diff options
Diffstat (limited to 'python/src/vectors.pxi')
-rw-r--r-- | python/src/vectors.pxi | 75 |
1 files changed, 50 insertions, 25 deletions
diff --git a/python/src/vectors.pxi b/python/src/vectors.pxi index 74d3a0bd..fc0c365f 100644 --- a/python/src/vectors.pxi +++ b/python/src/vectors.pxi @@ -1,16 +1,20 @@ from cython.operator cimport preincrement as pinc cdef class DenseVector: - cdef vector[weight_t]* vector + cdef vector[weight_t]* vector # Not owned by DenseVector + + def __len__(self): + return self.vector.size() def __getitem__(self, char* fname): - cdef unsigned fid = FDConvert(fname) - if fid <= self.vector.size(): + cdef int fid = FDConvert(fname) + if 0 <= fid < self.vector.size(): return self.vector[0][fid] raise KeyError(fname) def __setitem__(self, char* fname, float value): - cdef unsigned fid = FDConvert(<char *>fname) + cdef int fid = FDConvert(<char *>fname) + if fid < 0: raise KeyError(fname) if self.vector.size() <= fid: self.vector.resize(fid + 1) self.vector[0][fid] = value @@ -32,40 +36,43 @@ cdef class DenseVector: cdef class SparseVector: cdef FastSparseVector[weight_t]* vector + def __dealloc__(self): + del self.vector + + def copy(self): + return self * 1 + def __getitem__(self, char* fname): - cdef unsigned fid = FDConvert(fname) + cdef int fid = FDConvert(fname) + if fid < 0: raise KeyError(fname) return self.vector.value(fid) def __setitem__(self, char* fname, float value): - cdef unsigned fid = FDConvert(<char *>fname) + cdef int fid = FDConvert(<char *>fname) + if fid < 0: raise KeyError(fname) self.vector.set_value(fid, value) def __iter__(self): cdef FastSparseVector[weight_t].const_iterator* it = new FastSparseVector[weight_t].const_iterator(self.vector[0], False) - cdef str fname - for i in range(self.vector.size()): - fname = FDConvert(it[0].ptr().first).c_str() - yield (fname, it[0].ptr().second) - pinc(it[0]) + try: + for i in range(self.vector.size()): + yield (FDConvert(it[0].ptr().first).c_str(), it[0].ptr().second) + pinc(it[0]) # ++it + finally: + del it def dot(self, other): if isinstance(other, DenseVector): return self.vector.dot((<DenseVector> other).vector[0]) elif isinstance(other, SparseVector): return self.vector.dot((<SparseVector> other).vector[0]) - raise ValueError('cannot take the dot product of %s and SparseVector' % type(other)) - - def todense(self): - cdef DenseVector dense = DenseVector() - dense.vector = new vector[weight_t]() - self.vector.init_vector(dense.vector) - return dense + raise TypeError('cannot take the dot product of %s and SparseVector' % type(other)) - def __richcmp__(SparseVector self, SparseVector other, int op): + def __richcmp__(SparseVector x, SparseVector y, int op): if op == 2: # == - return self.vector[0] == other.vector[0] + return x.vector[0] == y.vector[0] elif op == 3: # != - return not (self == other) + return not (x == y) raise NotImplemented('comparison not implemented for SparseVector') def __len__(self): @@ -90,12 +97,30 @@ cdef class SparseVector: self.vector[0] /= scalar return self - def __add__(SparseVector self, SparseVector other): + def __add__(SparseVector x, SparseVector y): + cdef SparseVector result = SparseVector() + result.vector = new FastSparseVector[weight_t](x.vector[0] + y.vector[0]) + return result + + def __sub__(SparseVector x, SparseVector y): + cdef SparseVector result = SparseVector() + result.vector = new FastSparseVector[weight_t](x.vector[0] - y.vector[0]) + return result + + def __mul__(x, y): + cdef SparseVector vector + cdef float scalar + if isinstance(x, SparseVector): vector, scalar = x, y + else: vector, scalar = y, x cdef SparseVector result = SparseVector() - result.vector = new FastSparseVector[weight_t](self.vector[0] + other.vector[0]) + result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar) return result - def __sub__(SparseVector self, SparseVector other): + def __div__(x, y): + cdef SparseVector vector + cdef float scalar + if isinstance(x, SparseVector): vector, scalar = x, y + else: vector, scalar = y, x cdef SparseVector result = SparseVector() - result.vector = new FastSparseVector[weight_t](self.vector[0] - other.vector[0]) + result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar) return result |