summaryrefslogtreecommitdiff
path: root/python/src/vectors.pxi
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-07-08 14:26:51 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-07-08 14:26:51 +0200
commitc139ce495861bb341e1b86a85ad4559f9ad53c14 (patch)
tree1071839ee458f21f169ce06fc536fefe07e4c65d /python/src/vectors.pxi
parent3a94ac22e5c60aa205f2b3dadf81b0666500e0c3 (diff)
parentd01e5b66d3010d61b9b56301fd7f302dd4ea5bc8 (diff)
Merge branch 'master' of github.com:pks/cdec-dtrain
Diffstat (limited to 'python/src/vectors.pxi')
-rw-r--r--python/src/vectors.pxi81
1 files changed, 56 insertions, 25 deletions
diff --git a/python/src/vectors.pxi b/python/src/vectors.pxi
index 74d3a0bd..ce95968c 100644
--- a/python/src/vectors.pxi
+++ b/python/src/vectors.pxi
@@ -1,16 +1,20 @@
from cython.operator cimport preincrement as pinc
cdef class DenseVector:
- cdef vector[weight_t]* vector
+ cdef vector[weight_t]* vector # Not owned by DenseVector
+
+ def __len__(self):
+ return self.vector.size()
def __getitem__(self, char* fname):
- cdef unsigned fid = FDConvert(fname)
- if fid <= self.vector.size():
+ cdef int fid = FDConvert(fname)
+ if 0 <= fid < self.vector.size():
return self.vector[0][fid]
raise KeyError(fname)
def __setitem__(self, char* fname, float value):
- cdef unsigned fid = FDConvert(<char *>fname)
+ cdef int fid = FDConvert(<char *>fname)
+ if fid < 0: raise KeyError(fname)
if self.vector.size() <= fid:
self.vector.resize(fid + 1)
self.vector[0][fid] = value
@@ -32,40 +36,43 @@ cdef class DenseVector:
cdef class SparseVector:
cdef FastSparseVector[weight_t]* vector
+ def __dealloc__(self):
+ del self.vector
+
+ def copy(self):
+ return self * 1
+
def __getitem__(self, char* fname):
- cdef unsigned fid = FDConvert(fname)
+ cdef int fid = FDConvert(fname)
+ if fid < 0: raise KeyError(fname)
return self.vector.value(fid)
def __setitem__(self, char* fname, float value):
- cdef unsigned fid = FDConvert(<char *>fname)
+ cdef int fid = FDConvert(<char *>fname)
+ if fid < 0: raise KeyError(fname)
self.vector.set_value(fid, value)
def __iter__(self):
cdef FastSparseVector[weight_t].const_iterator* it = new FastSparseVector[weight_t].const_iterator(self.vector[0], False)
- cdef str fname
- for i in range(self.vector.size()):
- fname = FDConvert(it[0].ptr().first).c_str()
- yield (fname, it[0].ptr().second)
- pinc(it[0])
+ try:
+ for i in range(self.vector.size()):
+ yield (FDConvert(it[0].ptr().first).c_str(), it[0].ptr().second)
+ pinc(it[0]) # ++it
+ finally:
+ del it
def dot(self, other):
if isinstance(other, DenseVector):
return self.vector.dot((<DenseVector> other).vector[0])
elif isinstance(other, SparseVector):
return self.vector.dot((<SparseVector> other).vector[0])
- raise ValueError('cannot take the dot product of %s and SparseVector' % type(other))
-
- def todense(self):
- cdef DenseVector dense = DenseVector()
- dense.vector = new vector[weight_t]()
- self.vector.init_vector(dense.vector)
- return dense
+ raise TypeError('cannot take the dot product of %s and SparseVector' % type(other))
- def __richcmp__(SparseVector self, SparseVector other, int op):
+ def __richcmp__(SparseVector x, SparseVector y, int op):
if op == 2: # ==
- return self.vector[0] == other.vector[0]
+ return x.vector[0] == y.vector[0]
elif op == 3: # !=
- return not (self == other)
+ return not (x == y)
raise NotImplemented('comparison not implemented for SparseVector')
def __len__(self):
@@ -73,6 +80,12 @@ cdef class SparseVector:
def __contains__(self, char* fname):
return self.vector.nonzero(FDConvert(fname))
+
+ def __neg__(self):
+ cdef SparseVector result = SparseVector()
+ result.vector = new FastSparseVector[weight_t](self.vector[0])
+ result.vector[0] *= -1.0
+ return result
def __iadd__(SparseVector self, SparseVector other):
self.vector[0] += other.vector[0]
@@ -90,12 +103,30 @@ cdef class SparseVector:
self.vector[0] /= scalar
return self
- def __add__(SparseVector self, SparseVector other):
+ def __add__(SparseVector x, SparseVector y):
+ cdef SparseVector result = SparseVector()
+ result.vector = new FastSparseVector[weight_t](x.vector[0] + y.vector[0])
+ return result
+
+ def __sub__(SparseVector x, SparseVector y):
+ cdef SparseVector result = SparseVector()
+ result.vector = new FastSparseVector[weight_t](x.vector[0] - y.vector[0])
+ return result
+
+ def __mul__(x, y):
+ cdef SparseVector vector
+ cdef float scalar
+ if isinstance(x, SparseVector): vector, scalar = x, y
+ else: vector, scalar = y, x
cdef SparseVector result = SparseVector()
- result.vector = new FastSparseVector[weight_t](self.vector[0] + other.vector[0])
+ result.vector = new FastSparseVector[weight_t](vector.vector[0] * scalar)
return result
- def __sub__(SparseVector self, SparseVector other):
+ def __div__(x, y):
+ cdef SparseVector vector
+ cdef float scalar
+ if isinstance(x, SparseVector): vector, scalar = x, y
+ else: vector, scalar = y, x
cdef SparseVector result = SparseVector()
- result.vector = new FastSparseVector[weight_t](self.vector[0] - other.vector[0])
+ result.vector = new FastSparseVector[weight_t](vector.vector[0] / scalar)
return result