summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorgraehl <graehl@ec762483-ff6d-05da-a07a-a48fb63a330f>2010-08-11 02:46:13 +0000
committergraehl <graehl@ec762483-ff6d-05da-a07a-a48fb63a330f>2010-08-11 02:46:13 +0000
commit82384b4ec365f3d2ad2c9bca078a0b38d4be09c0 (patch)
treef6f015f2791ec9b1ec94c90c98b09c0d251aee9e
parente4e66118c14704509e214aa32689ef304ae5ada2 (diff)
merge
git-svn-id: https://ws10smt.googlecode.com/svn/trunk@511 ec762483-ff6d-05da-a07a-a48fb63a330f
-rwxr-xr-xdecoder/batched_append.h25
-rwxr-xr-xdecoder/cfg.cc44
-rwxr-xr-xdecoder/cfg.h29
-rwxr-xr-xdecoder/hash.h54
-rw-r--r--decoder/small_vector.h265
-rw-r--r--decoder/sparse_vector.h512
-rwxr-xr-xutils/hash.h6
-rw-r--r--utils/small_vector.h10
-rwxr-xr-xutils/swap_pod.h23
9 files changed, 967 insertions, 1 deletions
diff --git a/decoder/batched_append.h b/decoder/batched_append.h
new file mode 100755
index 00000000..745f567f
--- /dev/null
+++ b/decoder/batched_append.h
@@ -0,0 +1,25 @@
+#ifndef BATCHED_APPEND_H
+#define BATCHED_APPEND_H
+
+#include <algorithm> //swap
+#include <cstddef>
+
+template <class SRange,class Vector>
+void batched_append(Vector &v,SRange const& s) {
+ std::size_t news=v.size()+s.size();
+ v.reserve(news);
+ v.insert(v.end(),s.begin(),s.end());
+}
+
+template <class SRange,class Vector>
+void batched_append_swap(Vector &v,SRange & s) {
+ using namespace std; // to find the right swap
+ size_t i=v.size();
+ size_t news=i+s.size();
+ v.resize(news);
+ typename SRange::iterator_type si=s.begin();
+ for (;i<news;++i,++si)
+ swap(v[i],*si);
+}
+
+#endif
diff --git a/decoder/cfg.cc b/decoder/cfg.cc
index f899765e..74e23cb6 100755
--- a/decoder/cfg.cc
+++ b/decoder/cfg.cc
@@ -2,9 +2,20 @@
#include "hg.h"
#include "cfg_format.h"
#include "cfg_binarize.h"
+#include "hash.h"
+#include "batched_append.h"
+#include <limits>
+#include "fast_lexical_cast.hpp"
using namespace std;
+namespace {
+BinRhs nullrhs(std::numeric_limits<int>::min(),std::numeric_limits<int>::min());
+}
+WordID CFG::BinName(BinRhs const& b);
+{
+ return TD::Convert(lexical_cast<string>(b.first)+"+"+lexical_cast<string>(b.second));
+}
void CFG::Binarize(CFGBinarize const& b) {
if (!b.Binarizing()) return;
@@ -14,7 +25,38 @@ void CFG::Binarize(CFGBinarize const& b) {
}
// l2r only so far:
cerr << "Binarizing "<<b<<endl;
- //TODO.
+ HASH_MAP<BinRhs,NTHandle> bin2lhs; // we're going to hash cons rather than build an explicit trie from right to left.
+ HASH_MAP_EMPTY(bin2lhs,nullrhs);
+ int rhsmin=b.bin_unary?0:1;
+ // iterate using indices and not iterators because we'll be adding to both nodes and edge list. we could instead pessimistically reserve space for both, but this is simpler. also: store original end of nts since we won't need to reprocess newly added ones.
+ NTs new_nts; // these will be appended at the end, so we don't have to worry about iterator invalidation
+ Rules new_rules;
+ //TODO: this could be factored easily into in-place (append to new_* like below) and functional (nondestructive copy) versions (copy orig to target and append to target)
+ int newnt=nts.size();
+ int newruleid=rules.size();
+ BinRhs bin;
+ for (NTs::const_iterator n=nts.begin(),nn=nts.end();n!=nn;++n) {
+ NT const& nt=*n;
+ for (Ruleids::iterator ir=n.ruleids.begin(),er=n.ruleids.end();ir!=er;++ir) {
+ RHS &rhs=ir->rhs; // we're going to binarize this while adding newly created rules to new_...
+ if (rhs.empty()) continue;
+ bin.second=rhs.back();
+ for (int r=rhs.size()-2;r>=rhsmin;--r) { // pairs from right to left (normally we leave the last pair alone)
+ rhs.pop_back();
+ bin.first=rhs[r];
+ if (newnt==(bin.second=(get_default(bin2lhs,bin,newnt)))) {
+ new_nts.push_back(NT());
+ new_nts.back().ruleids.push_back(newruleid);
+ new_rules.push_back(Rule(newnt,bin));
+ if (b.bin_name_nts)
+ new_nts.back().from.nt=BinName(bin);
+ ++newnt;++newruleid;
+ }
+ }
+ }
+ }
+ batch_append_swap(nts,new_nts);
+ batch_append_swap(rules,new_rules);
}
namespace {
diff --git a/decoder/cfg.h b/decoder/cfg.h
index a390ece9..19d30f8b 100755
--- a/decoder/cfg.h
+++ b/decoder/cfg.h
@@ -27,6 +27,7 @@
//#include "int_or_pointer.h"
#include "small_vector.h"
#include "nt_span.h"
+#include <algorithm>
class Hypergraph;
class CFGFormat; // #include "cfg_format.h"
@@ -42,7 +43,16 @@ struct CFG {
o << nts[n].from;
}
+ typedef std::pair<int,int> BinRhs;
+ WordID BinName(BinRhs const& b);
+
struct Rule {
+ // for binarizing - no costs/probs
+ Rule(int lhs,BinRhs const& binrhs) : lhs(lhs),rhs(2),p(1) {
+ rhs[0]=binrhs.first;
+ rhs[1]=binrhs.second;
+ }
+
int lhs; // index into nts
RHS rhs;
prob_t p; // h unused for now (there's nothing admissable, and p is already using 1st pass inside as pushed toward top)
@@ -50,11 +60,26 @@ struct CFG {
#if CFG_DEBUG
TRulePtr rule; // normally no use for this (waste of space)
#endif
+ void Swap(Rule &o) {
+ using namespace std;
+ swap(lhs,o.lhs);
+ swap(rhs,o.rhs);
+ swap(p,o.p);
+ swap(f,o.f);
+#if CFG_DEBUG
+ swap(rule,o.rule);
+#endif
+ }
};
struct NT {
Ruleids ruleids; // index into CFG rules with lhs = this NT. aka in_edges_
NTSpan from; // optional name - still needs id to disambiguate
+ void Swap(NT &o) {
+ using namespace std;
+ swap(ruleids,o.ruleids);
+ swap(from,o.from);
+ }
};
CFG() : hg_() { uninit=true; }
@@ -92,5 +117,9 @@ protected:
int goal_nt;
};
+inline void swap(CFG &a,CFG &b) {
+ a.Swap(b);
+}
+
#endif
diff --git a/decoder/hash.h b/decoder/hash.h
new file mode 100755
index 00000000..3a60a429
--- /dev/null
+++ b/decoder/hash.h
@@ -0,0 +1,54 @@
+#ifndef CDEC_HASH_H
+#define CDEC_HASH_H
+
+#include "murmur_hash.h"
+
+#include "config.h"
+#ifdef HAVE_SPARSEHASH
+# include <google/dense_hash_map>
+# define HASH_MAP google::dense_hash_map
+# define HASH_MAP_RESERVED(h,empty,deleted) do { h.set_empty_key(empty); h.set_deleted_key(deleted); } while(0)
+# define HASH_MAP_EMPTY(h,empty) do { h.set_empty_key(empty); } while(0)
+#else
+# include <tr1/unordered_map>
+# define HASH_MAP std::tr1::unordered_map
+# define HASH_MAP_RESERVED(h,empty,deleted)
+# define HASH_MAP_EMPTY(h,empty)
+#endif
+
+#include <boost/functional/hash.hpp>
+
+// assumes C is POD
+template <class C>
+struct murmur_hash
+{
+ typedef MurmurInt return_type;
+ typedef C /*const&*/ argument_type;
+ return_type operator()(argument_type const& c) const {
+ return MurmurHash((void*)&c,sizeof(c));
+ }
+};
+
+// murmur_hash_array isn't std guaranteed safe (you need to use string::data())
+template <>
+struct murmur_hash<std::string>
+{
+ typedef MurmurInt return_type;
+ typedef std::string /*const&*/ argument_type;
+ return_type operator()(argument_type const& c) const {
+ return MurmurHash(c.data(),c.size());
+ }
+};
+
+// uses begin(),size() assuming contiguous layout and POD
+template <class C>
+struct murmur_hash_array
+{
+ typedef MurmurInt return_type;
+ typedef C /*const&*/ argument_type;
+ return_type operator()(argument_type const& c) const {
+ return MurmurHash(&*c.begin(),c.size()*sizeof(*c.begin()));
+ }
+};
+
+#endif
diff --git a/decoder/small_vector.h b/decoder/small_vector.h
new file mode 100644
index 00000000..25c52359
--- /dev/null
+++ b/decoder/small_vector.h
@@ -0,0 +1,265 @@
+#ifndef _SMALL_VECTOR_H_
+#define _SMALL_VECTOR_H_
+
+/* REQUIRES that T is POD (can be memcpy). won't work (yet) due to union with SMALL_VECTOR_POD==0 - may be possible to handle movable types that have ctor/dtor, by using explicit allocation, ctor/dtor calls. but for now JUST USE THIS FOR no-meaningful ctor/dtor POD types.
+
+ stores small element (<=SV_MAX items) vectors inline. recommend SV_MAX=sizeof(T)/sizeof(T*)>1?sizeof(T)/sizeof(T*):1. may not work if SV_MAX==0.
+ */
+
+#define SMALL_VECTOR_POD 1
+
+#include <streambuf> // std::max - where to get this?
+#include <cstring>
+#include <cassert>
+#include <stdint.h>
+#include <new>
+#include <stdint.h>
+//sizeof(T)/sizeof(T*)>1?sizeof(T)/sizeof(T*):1
+
+template <class T,int SV_MAX=2>
+class SmallVector {
+// typedef unsigned short uint16_t;
+ public:
+ typedef SmallVector<T,SV_MAX> Self;
+ SmallVector() : size_(0) {}
+
+ typedef T const* const_iterator;
+ typedef T* iterator;
+ typedef T value_type;
+ typedef T &reference;
+ typedef T const& const_reference;
+
+ T *begin() { return size_>SV_MAX?data_.ptr:data_.vals; }
+ T const* begin() const { return const_cast<Self*>(this)->begin(); }
+ T *end() { return begin()+size_; }
+ T const* end() const { return begin()+size_; }
+
+ explicit SmallVector(size_t s) : size_(s) {
+ assert(s < 0xA000);
+ if (s <= SV_MAX) {
+ for (int i = 0; i < s; ++i) new(&data_.vals[i]) T();
+ } else {
+ capacity_ = s;
+ size_ = s;
+ data_.ptr = new T[s]; // TODO: replace this with allocator or ::operator new(sizeof(T)*s) everywhere
+ for (int i = 0; i < size_; ++i) new(&data_.ptr[i]) T();
+ }
+ }
+
+ SmallVector(size_t s, T const& v) : size_(s) {
+ assert(s < 0xA000);
+ if (s <= SV_MAX) {
+ for (int i = 0; i < s; ++i) data_.vals[i] = v;
+ } else {
+ capacity_ = s;
+ size_ = s;
+ data_.ptr = new T[s];
+ for (int i = 0; i < size_; ++i) data_.ptr[i] = v;
+ }
+ }
+
+ SmallVector(const Self& o) : size_(o.size_) {
+ if (size_ <= SV_MAX) {
+ std::memcpy(data_.vals,o.data_.vals,size_*sizeof(T));
+// for (int i = 0; i < size_; ++i) data_.vals[i] = o.data_.vals[i];
+ } else {
+ capacity_ = size_ = o.size_;
+ data_.ptr = new T[capacity_];
+ std::memcpy(data_.ptr, o.data_.ptr, size_ * sizeof(T));
+ }
+ }
+
+ const Self& operator=(const Self& o) {
+ if (size_ <= SV_MAX) {
+ if (o.size_ <= SV_MAX) {
+ size_ = o.size_;
+ for (int i = 0; i < SV_MAX; ++i) data_.vals[i] = o.data_.vals[i];
+ } else {
+ capacity_ = size_ = o.size_;
+ data_.ptr = new T[capacity_];
+ std::memcpy(data_.ptr, o.data_.ptr, size_ * sizeof(T));
+ }
+ } else {
+ if (o.size_ <= SV_MAX) {
+ delete[] data_.ptr;
+ size_ = o.size_;
+ for (int i = 0; i < size_; ++i) data_.vals[i] = o.data_.vals[i];
+ } else {
+ if (capacity_ < o.size_) {
+ delete[] data_.ptr;
+ capacity_ = o.size_;
+ data_.ptr = new T[capacity_];
+ }
+ size_ = o.size_;
+ for (int i = 0; i < size_; ++i)
+ data_.ptr[i] = o.data_.ptr[i];
+ }
+ }
+ return *this;
+ }
+
+ ~SmallVector() {
+ if (size_ <= SV_MAX) {
+ // skip if pod? yes, we required pod anyway. no need to destruct
+#if !SMALL_VECTOR_POD
+ for (int i=0;i<size_;++i) data_.vals[i].~T();
+#endif
+ } else
+ delete[] data_.ptr;
+ }
+
+ void clear() {
+ if (size_ > SV_MAX) {
+ delete[] data_.ptr;
+ }
+ size_ = 0;
+ }
+
+ bool empty() const { return size_ == 0; }
+ size_t size() const { return size_; }
+
+ inline void ensure_capacity(uint16_t min_size) {
+ assert(min_size > SV_MAX);
+ if (min_size < capacity_) return;
+ uint16_t new_cap = std::max(static_cast<uint16_t>(capacity_ << 1), min_size);
+ T* tmp = new T[new_cap];
+ std::memcpy(tmp, data_.ptr, capacity_ * sizeof(T));
+ delete[] data_.ptr;
+ data_.ptr = tmp;
+ capacity_ = new_cap;
+ }
+
+private:
+ inline void copy_vals_to_ptr() {
+ capacity_ = SV_MAX * 2;
+ T* tmp = new T[capacity_];
+ for (int i = 0; i < SV_MAX; ++i) tmp[i] = data_.vals[i];
+ data_.ptr = tmp;
+ }
+ inline void ptr_to_small() {
+ assert(size_<=SV_MAX);
+ int *tmp=data_.ptr;
+ for (int i=0;i<size_;++i)
+ data_.vals[i]=tmp[i];
+ delete[] tmp;
+ }
+
+public:
+
+ inline void push_back(T const& v) {
+ if (size_ < SV_MAX) {
+ data_.vals[size_] = v;
+ ++size_;
+ return;
+ } else if (size_ == SV_MAX) {
+ copy_vals_to_ptr();
+ } else if (size_ == capacity_) {
+ ensure_capacity(size_ + 1);
+ }
+ data_.ptr[size_] = v;
+ ++size_;
+ }
+
+ T& back() { return this->operator[](size_ - 1); }
+ const T& back() const { return this->operator[](size_ - 1); }
+ T& front() { return this->operator[](0); }
+ const T& front() const { return this->operator[](0); }
+
+ void pop_back() {
+ assert(size_>0);
+ --size_;
+ if (size_==SV_MAX)
+ ptr_to_small();
+ }
+
+ void compact() {
+ compact(size_);
+ }
+
+ // size must be <= size_ - TODO: test
+ void compact(uint16_t size) {
+ assert(size<=size_);
+ if (size_>SV_MAX) {
+ size_=size;
+ if (size<=SV_MAX)
+ ptr_to_small();
+ } else
+ size_=size;
+ }
+
+ void resize(size_t s, int v = 0) {
+ if (s <= SV_MAX) {
+ if (size_ > SV_MAX) {
+ T *tmp=data_.ptr;
+ for (int i = 0; i < s; ++i) data_.vals[i] = tmp[i];
+ delete[] tmp;
+ size_ = s;
+ return;
+ }
+ if (s <= size_) {
+ size_ = s;
+ return;
+ } else {
+ for (int i = size_; i < s; ++i)
+ data_.vals[i] = v;
+ size_ = s;
+ return;
+ }
+ } else {
+ if (size_ <= SV_MAX)
+ copy_vals_to_ptr();
+ if (s > capacity_)
+ ensure_capacity(s);
+ if (s > size_) {
+ for (int i = size_; i < s; ++i)
+ data_.ptr[i] = v;
+ }
+ size_ = s;
+ }
+ }
+
+ T& operator[](size_t i) {
+ if (size_ <= SV_MAX) return data_.vals[i];
+ return data_.ptr[i];
+ }
+
+ const T& operator[](size_t i) const {
+ if (size_ <= SV_MAX) return data_.vals[i];
+ return data_.ptr[i];
+ }
+
+ bool operator==(const Self& o) const {
+ if (size_ != o.size_) return false;
+ if (size_ <= SV_MAX) {
+ for (size_t i = 0; i < size_; ++i)
+ if (data_.vals[i] != o.data_.vals[i]) return false;
+ return true;
+ } else {
+ for (size_t i = 0; i < size_; ++i)
+ if (data_.ptr[i] != o.data_.ptr[i]) return false;
+ return true;
+ }
+ }
+
+ friend bool operator!=(const Self& a, const Self& b) {
+ return !(a==b);
+ }
+
+ private:
+ union StorageType {
+ T vals[SV_MAX];
+ T* ptr;
+ };
+ StorageType data_;
+ uint16_t size_;
+ uint16_t capacity_; // only defined when size_ > __SV_MAX_STATIC
+};
+
+typedef SmallVector<int,2> SmallVectorInt;
+
+template <class T,int N>
+void memcpy(void *out,SmallVector<T,N> const& v) {
+ std::memcpy(out,v.begin(),v.size()*sizeof(T));
+}
+
+#endif
diff --git a/decoder/sparse_vector.h b/decoder/sparse_vector.h
new file mode 100644
index 00000000..207489c5
--- /dev/null
+++ b/decoder/sparse_vector.h
@@ -0,0 +1,512 @@
+#ifndef _SPARSE_VECTOR_H_
+#define _SPARSE_VECTOR_H_
+
+//#define SPARSE_VECTOR_HASH
+
+#ifdef SPARSE_VECTOR_HASH
+#include "hash.h"
+# define SPARSE_VECTOR_MAP HASH_MAP
+# define SPARSE_VECTOR_MAP_RESERVED(h,empty,deleted) HASH_MAP_RESERVED(h,empty,deleted)
+#else
+# define SPARSE_VECTOR_MAP std::map
+# define SPARSE_VECTOR_MAP_RESERVED(h,empty,deleted)
+#endif
+/*
+ use SparseVectorList (pair smallvector) for feat funcs / hypergraphs (you rarely need random access; just append a feature to the list)
+*/
+/* hack: index 0 never gets printed because cdyer is creative and efficient. features which have no weight got feature dict id 0, see, and the models all clobered that value. nobody wants to see it. except that vlad is also creative and efficient and stored the oracle bleu there. */
+/* NOTE: zero vals may or may not be dropped from map (sparse, but not guaranteed to be so).
+
+ I rely on !v the same as !((bool)v) the same as v==0 and v() same as v(0).
+
+ one exception:
+
+ a local:
+ T sum = 0;
+ is used instead of
+ T sum;
+
+ because T may be a primitive type, and
+
+ T sum();
+
+ is parsed as a function decl :(
+
+ the alternative T sum=T() is also be reasonable. i've switched to that.
+*/
+
+// this is a modified version of code originally written
+// by Phil Blunsom
+
+#include <iostream>
+#include <map>
+#include <tr1/unordered_map>
+#include <vector>
+#include <valarray>
+
+#include "fdict.h"
+#include "small_vector.h"
+
+template <class T>
+inline T & extend_vector(std::vector<T> &v,int i) {
+ if (i>=v.size())
+ v.resize(i+1);
+ return v[i];
+}
+
+template <typename T>
+class SparseVector {
+ void init_reserved() {
+ SPARSE_VECTOR_MAP_RESERVED(values_,-1,-2);
+ }
+public:
+ T const& get_singleton() const {
+ assert(values_.size()==1);
+ return values_.begin()->second;
+ }
+
+ typedef SparseVector<T> Self;
+ typedef SPARSE_VECTOR_MAP<int, T> MapType;
+ typedef typename MapType::const_iterator const_iterator;
+ SparseVector() {
+ init_reserved();
+ }
+ explicit SparseVector(std::vector<T> const& v) {
+ init_reserved();
+ typename MapType::iterator p=values_.begin();
+ const T z=0;
+ for (unsigned i=0;i<v.size();++i) {
+ T const& t=v[i];
+ if (t!=z)
+ p=values_.insert(p,typename MapType::value_type(i,t)); //hint makes insertion faster
+ }
+ }
+
+
+ void init_vector(std::vector<T> *vp) const {
+ init_vector(*vp);
+ }
+
+ void init_vector(std::vector<T> &v) const {
+ v.clear();
+ for (const_iterator i=values_.begin(),e=values_.end();i!=e;++i)
+ extend_vector(v,i->first)=i->second;
+ }
+
+ void set_new_value(int index, T const& val) {
+ assert(values_.find(index)==values_.end());
+ values_[index]=val;
+ }
+
+
+ // warning: exploits the fact that 0 values are always removed from map. change this if you change that.
+ bool nonzero(int index) const {
+ typename MapType::const_iterator found = values_.find(index);
+ return found==values_.end() || !found->second;
+ }
+
+
+ T get(int index) const {
+ typename MapType::const_iterator found = values_.find(index);
+ return found==values_.end()?T():found->second;
+ }
+
+ T value(int i) const { return get(i); }
+
+ // same as above but may add a 0 entry. TODO: check that people relying on no entry use get
+ T & operator[](int index){
+ return values_[index];
+ }
+
+ inline void set_value(int index, const T &value) {
+ values_[index] = value;
+ }
+
+ inline void maybe_add(int index, const T& value) {
+ if (value) add_value(index,value);
+ }
+
+ T& add_value(int index, const T &value) {
+#if 1
+ return values_[index]+=value;
+#else
+ // this is not really going to be any faster, and we already rely on default init = 0 init
+ std::pair<typename MapType::iterator,bool> art=values_.insert(std::make_pair(index,value));
+ T &val=art.first->second;
+ if (!art.second) val += value; // already existed
+ return val;
+#endif
+ }
+
+
+ void store(std::valarray<T>* target) const {
+ (*target) *= 0;
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it) {
+ if (it->first >= target->size()) break;
+ (*target)[it->first] = it->second;
+ }
+ }
+
+ int max_index() const {
+ if (empty()) return 0;
+ typename MapType::const_iterator found =values_.end();
+ --found;
+ return found->first;
+ }
+
+ // dot product with a unit vector of the same length
+ // as the sparse vector
+ T dot() const {
+ T sum = T();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ sum += it->second;
+ return sum;
+ }
+
+ template<typename S>
+ S cosine_sim(const SparseVector<S> &vec) const {
+ return dot(vec)/(l2norm()*vec.l2norm());
+ }
+
+ // if values are binary, gives |A intersect B|/|A union B|
+ template<typename S>
+ S tanimoto_coef(const SparseVector<S> &vec) const {
+ S dp=dot(vec);
+ return dp/(l2norm_sq()+vec.l2norm_sq()-dp);
+ }
+
+ template<typename S>
+ S dot(const SparseVector<S> &vec) const {
+ S sum = S();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ {
+ typename MapType::const_iterator
+ found = vec.values_.find(it->first);
+ if (found != vec.values_.end())
+ sum += it->second * found->second;
+ }
+ return sum;
+ }
+
+ template<typename S>
+ S dot(const std::vector<S> &vec) const {
+ S sum = S();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ {
+ if (it->first < static_cast<int>(vec.size()))
+ sum += it->second * vec[it->first];
+ }
+ return sum;
+ }
+
+ template<typename S>
+ S dot(const S *vec) const {
+ // this is not range checked!
+ S sum = S();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ sum += it->second * vec[it->first];
+ std::cout << "dot(*vec) " << sum << std::endl;
+ return sum;
+ }
+
+ T l1norm() const {
+ T sum = T();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ sum += fabs(it->second);
+ return sum;
+ }
+
+ T l2norm_sq() const {
+ T sum = T();
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ sum += it->second * it->second;
+ return sum;
+ }
+
+ T l2norm() const {
+ return sqrt(l2norm_sq());
+ }
+
+ void erase(int key) {
+ values_.erase(key);
+/* typename MapType::iterator found = values_.find(key);
+ if (found!=values_end())
+ values_.erase(found);*/
+ }
+
+ template <class T2>
+ void set_from(SparseVector<T2> const& other) {
+ for (typename MapType::const_iterator
+ it = other.values_.begin(); it != other.values_.end(); ++it)
+ {
+ values_[it->first]=it->second;
+ }
+ }
+
+ SparseVector<T> &operator+=(const SparseVector<T> &other) {
+ for (typename MapType::const_iterator
+ it = other.values_.begin(); it != other.values_.end(); ++it)
+ {
+// T v =
+ (values_[it->first] += it->second);
+// if (!v) values_.erase(it->first);
+ }
+ return *this;
+ }
+
+ SparseVector<T> &operator-=(const SparseVector<T> &other) {
+ for (typename MapType::const_iterator
+ it = other.values_.begin(); it != other.values_.end(); ++it)
+ {
+// T v =
+ (values_[it->first] -= it->second);
+// if (!v) values_.erase(it->first);
+ }
+ return *this;
+ }
+
+ friend SparseVector<T> operator -(SparseVector<T> x,SparseVector<T> const& y) {
+ x-=y;
+ return x;
+ }
+ friend SparseVector<T> operator +(SparseVector<T> x,SparseVector<T> const& y) {
+ x+=y;
+ return x;
+ }
+
+private:
+ // DEPRECATED: becuase 0 values are dropped from the map, this doesn't even make sense if you have a fully populated (not really sparse re: what you'll ever use) vector
+ SparseVector<T> &operator-=(T const& x) {
+ for (typename MapType::iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ it->second -= x;
+ return *this;
+ }
+
+ SparseVector<T> &operator+=(T const& x) {
+ for (typename MapType::iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ it->second += x;
+ return *this;
+ }
+public:
+ SparseVector<T> &operator/=(const T &x) {
+ for (typename MapType::iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ it->second /= x;
+ return *this;
+ }
+
+ SparseVector<T> &operator*=(const T& x) {
+ for (typename MapType::iterator
+ it = values_.begin(); it != values_.end(); ++it)
+ it->second *= x;
+ return *this;
+ }
+
+ SparseVector<T> operator+(T const& x) const {
+ SparseVector<T> result = *this;
+ return result += x;
+ }
+
+ SparseVector<T> operator-(T const& x) const {
+ SparseVector<T> result = *this;
+ return result -= x;
+ }
+
+ SparseVector<T> operator/(T const& x) const {
+ SparseVector<T> result = *this;
+ return result /= x;
+ }
+
+ std::ostream &operator<<(std::ostream& out) const {
+ Write(true, &out);
+ return out;
+ }
+
+ void Write(const bool with_semi, std::ostream* os) const {
+ bool first = true;
+ for (typename MapType::const_iterator
+ it = values_.begin(); it != values_.end(); ++it) {
+ // by definition feature id 0 is a dummy value
+ if (!it->first) continue;
+ if (with_semi) {
+ (*os) << (first ? "" : ";")
+ << FD::Convert(it->first) << '=' << it->second;
+ } else {
+ (*os) << (first ? "" : " ")
+ << FD::Convert(it->first) << '=' << it->second;
+ }
+ first = false;
+ }
+ }
+
+ bool operator==(Self const & other) const {
+ return size()==other.size() && contains_keys_of(other) && other.contains_i(*this);
+ }
+
+ bool contains(Self const &o) const {
+ return size()>o.size() && contains(o);
+ }
+
+ bool at_equals(int i,T const& val) const {
+ const_iterator it=values_.find(i);
+ if (it==values_.end()) return !val;
+ return it->second==val;
+ }
+
+ bool contains_i(Self const& o) const {
+ for (typename MapType::const_iterator i=o.begin(),e=o.end();i!=e;++i)
+ if (!at_equals(i->first,i->second))
+ return false;
+ return true;
+ }
+
+ bool contains_keys_of(Self const& o) const {
+ for (typename MapType::const_iterator i=o.begin(),e=o.end();i!=e;++i)
+ if (values_.find(i)==values_.end())
+ return false;
+ return true;
+ }
+
+#ifndef SPARSE_VECTOR_HASH
+ bool operator<(const SparseVector<T> &other) const {
+ typename MapType::const_iterator it = values_.begin();
+ typename MapType::const_iterator other_it = other.values_.begin();
+
+ for (; it != values_.end() && other_it != other.values_.end(); ++it, ++other_it)
+ {
+ if (it->first < other_it->first) return true;
+ if (it->first > other_it->first) return false;
+ if (it->second < other_it->second) return true;
+ if (it->second > other_it->second) return false;
+ }
+ return values_.size() < other.values_.size();
+ }
+#endif
+
+ int size() const { return values_.size(); }
+
+ int num_active() const { return values_.size(); }
+ bool empty() const { return values_.empty(); }
+
+ const_iterator begin() const { return values_.begin(); }
+ const_iterator end() const { return values_.end(); }
+
+ void clear() {
+ values_.clear();
+ }
+
+ void swap(SparseVector<T>& other) {
+ values_.swap(other.values_);
+ }
+
+private:
+ MapType values_;
+};
+
+//like a pair but can live in a union, because it lacks default+copy ctors, dtor.
+template <class T>
+struct feature_val {
+ int fid;
+ T val;
+};
+
+template <class T>
+inline feature_val<T> featval(int fid,T const &val) {
+ feature_val<T> f;
+ f.fid=fid;
+ f.val=val;
+ return f;
+}
+
+
+// doesn't support fast indexing directly
+template <class T>
+class SparseVectorList {
+ typedef feature_val<T> Pair;
+ typedef SmallVector<Pair,1> List;
+ typedef typename List::const_iterator const_iterator;
+ SparseVectorList() { }
+ template <class I>
+ SparseVectorList(I i,I const& end) {
+ int c=0;
+ for (;i<end;++i,++c) {
+ if (*i)
+ p.push_back(featval(c,*i));
+ }
+ p.compact();
+ }
+ explicit SparseVectorList(std::vector<T> const& v) {
+ for (unsigned i=0;i<v.size();++i) {
+ T const& t=v[i];
+ if (t)
+ p.push_back(featval(i,t));
+ }
+ p.compact();
+ }
+ // unlike SparseVector, this doesn't overwrite - but conversion to SparseVector will use last value, which is the same
+ void set_value(int i,T const& val) {
+ p.push_back(Pair(i,val));
+ }
+ void overlay(SparseVector<T> *to) const {
+ for (int i=0;i<p.size();++i)
+ to->set_value(p[i].fid,p[i].val);
+ }
+ void copy_to(SparseVector<T> *to) const {
+ to->clear();
+ overlay(to);
+ }
+ SparseVector<T> sparse() const {
+ SparseVector<T> r;
+ copy_to(r);
+ return r;
+ }
+private:
+ List p;
+};
+
+template <typename T>
+SparseVector<T> operator+(const SparseVector<T>& a, const SparseVector<T>& b) {
+ SparseVector<T> result = a;
+ return result += b;
+}
+
+template <typename T>
+SparseVector<T> operator*(const SparseVector<T>& a, const double& b) {
+ SparseVector<T> result = a;
+ return result *= b;
+}
+
+template <typename T>
+SparseVector<T> operator*(const SparseVector<T>& a, const T& b) {
+ SparseVector<T> result = a;
+ return result *= b;
+}
+
+template <typename T>
+SparseVector<T> operator*(const double& a, const SparseVector<T>& b) {
+ SparseVector<T> result = b;
+ return result *= a;
+}
+
+template <typename T>
+std::ostream &operator<<(std::ostream &out, const SparseVector<T> &vec)
+{
+ return vec.operator<<(out);
+}
+
+namespace B64 {
+ void Encode(double objective, const SparseVector<double>& v, std::ostream* out);
+ // returns false if failed to decode
+ bool Decode(double* objective, SparseVector<double>* v, const char* data, size_t size);
+}
+
+#endif
diff --git a/utils/hash.h b/utils/hash.h
index 3a60a429..5d7f2010 100755
--- a/utils/hash.h
+++ b/utils/hash.h
@@ -51,4 +51,10 @@ struct murmur_hash_array
}
};
+// adds default val to table if key wasn't found, returns ref to val
+template <class H,class K>
+typename H::data_type & get_default(H &ht,K const& k,typename H::data_type const& v) {
+ return const_cast<typename H::data_type &>(ht.insert(typename H::value_type(k,v)).first->second);
+}
+
#endif
diff --git a/utils/small_vector.h b/utils/small_vector.h
index 25c52359..ae1e5727 100644
--- a/utils/small_vector.h
+++ b/utils/small_vector.h
@@ -14,6 +14,7 @@
#include <stdint.h>
#include <new>
#include <stdint.h>
+#include "swap_pod.h"
//sizeof(T)/sizeof(T*)>1?sizeof(T)/sizeof(T*):1
template <class T,int SV_MAX=2>
@@ -245,6 +246,10 @@ public:
return !(a==b);
}
+ void swap(Self& o) const {
+ swap_pod(*this,o);
+ }
+
private:
union StorageType {
T vals[SV_MAX];
@@ -255,6 +260,11 @@ public:
uint16_t capacity_; // only defined when size_ > __SV_MAX_STATIC
};
+template <class T,int SV_MAX>
+inline void swap(SmallVector<T,SV_MAX> &a,SmallVector<T,SV_MAX> &b) {
+ a.swap(b);
+}
+
typedef SmallVector<int,2> SmallVectorInt;
template <class T,int N>
diff --git a/utils/swap_pod.h b/utils/swap_pod.h
new file mode 100755
index 00000000..bb9a830d
--- /dev/null
+++ b/utils/swap_pod.h
@@ -0,0 +1,23 @@
+#ifndef SWAP_POD_H
+#define SWAP_POD_H
+
+//for swapping objects of the same concrete type where just swapping their bytes will work. will at least work on plain old data.
+
+#include <algorithm> // not used, but people who use this will want to bring std::swap in anyway
+#include <cstring>
+
+template <class T>
+inline void swap_pod(T &a,T &b) {
+ using namespace std;
+ const unsigned s=sizeof(T);
+ char tmp[s];
+ void *pt=(void*)tmp;
+ void *pa=(void*)&a;
+ void *pb=(void*)&b;
+ memcpy(pt,pa,s);
+ memcpy(pa,pb,s);
+ memcpy(pb,pt,s);
+}
+
+
+#endif