diff options
-rw-r--r-- | configure.ac | 2 | ||||
-rw-r--r-- | dtrain/dtrain.cc | 4 | ||||
-rw-r--r-- | rst_parser/mst_train.cc | 2 | ||||
-rw-r--r-- | rst_parser/rst_train.cc | 4 | ||||
-rw-r--r-- | training/liblbfgs/Makefile.am | 2 | ||||
-rw-r--r-- | training/model1.cc | 61 | ||||
-rw-r--r-- | training/mpi_flex_optimize.cc | 2 | ||||
-rw-r--r-- | training/ttables.h | 17 | ||||
-rw-r--r-- | utils/ccrp_onetable.h | 2 | ||||
-rw-r--r-- | utils/corpus_tools.cc | 20 | ||||
-rw-r--r-- | utils/corpus_tools.h | 4 | ||||
-rw-r--r-- | utils/dict.h | 3 | ||||
-rw-r--r-- | utils/fast_sparse_vector.h | 108 | ||||
-rw-r--r-- | utils/hash.h | 7 | ||||
-rw-r--r-- | utils/sampler.h | 16 |
15 files changed, 192 insertions, 62 deletions
diff --git a/configure.ac b/configure.ac index 0635e8dc..19498794 100644 --- a/configure.ac +++ b/configure.ac @@ -130,6 +130,6 @@ then AM_CONDITIONAL([GLC], true) fi -CPPFLAGS="$CPPFLAGS -DHAVE_CONFIG_H" +CPPFLAGS="-fPIC $CPPFLAGS -DHAVE_CONFIG_H" AC_OUTPUT(Makefile rst_parser/Makefile utils/Makefile mteval/Makefile extools/Makefile decoder/Makefile phrasinator/Makefile training/Makefile training/liblbfgs/Makefile dpmert/Makefile pro-train/Makefile rampion/Makefile klm/util/Makefile klm/lm/Makefile mira/Makefile dtrain/Makefile gi/pyp-topics/src/Makefile gi/clda/src/Makefile gi/pf/Makefile gi/markov_al/Makefile) diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc index eea58393..b3e62914 100644 --- a/dtrain/dtrain.cc +++ b/dtrain/dtrain.cc @@ -616,12 +616,12 @@ main(int argc, char** argv) o.precision(17); o << _np; if (average) { - for (SparseVector<weight_t>::const_iterator it = w_average.begin(); it != w_average.end(); ++it) { + for (SparseVector<weight_t>::iterator it = w_average.begin(); it != w_average.end(); ++it) { if (it->second == 0) continue; o << FD::Convert(it->first) << '\t' << it->second << endl; } } else { - for (SparseVector<weight_t>::const_iterator it = lambdas.begin(); it != lambdas.end(); ++it) { + for (SparseVector<weight_t>::iterator it = lambdas.begin(); it != lambdas.end(); ++it) { if (it->second == 0) continue; o << FD::Convert(it->first) << '\t' << it->second << endl; } diff --git a/rst_parser/mst_train.cc b/rst_parser/mst_train.cc index 6332693e..a78df600 100644 --- a/rst_parser/mst_train.cc +++ b/rst_parser/mst_train.cc @@ -176,7 +176,7 @@ int main(int argc, char** argv) { for (int iter = 0; iter < iterations; ++iter) { cerr << "ITERATION " << iter << " " << flush; fill(g.begin(), g.end(), 0.0); - for (SparseVector<double>::const_iterator it = empirical.begin(); it != empirical.end(); ++it) + for (SparseVector<double>::iterator it = empirical.begin(); it != empirical.end(); ++it) g[it->first] = -it->second; double obj = -empirical.dot(weights); vector<boost::shared_ptr<GradientWorker> > jobs; diff --git a/rst_parser/rst_train.cc b/rst_parser/rst_train.cc index 9b730f3d..a8b8dd84 100644 --- a/rst_parser/rst_train.cc +++ b/rst_parser/rst_train.cc @@ -126,12 +126,12 @@ int main(int argc, char** argv) { u.logeq(tot_feats.dot(weights)); prob_t w = u / q; zhat += w; - for (SparseVector<double>::const_iterator it = tot_feats.begin(); it != tot_feats.end(); ++it) + for (SparseVector<double>::iterator it = tot_feats.begin(); it != tot_feats.end(); ++it) sampled_exp.add_value(it->first, w * prob_t(it->second)); } sampled_exp /= zhat; SparseVector<double> tot_m; - for (SparseVector<prob_t>::const_iterator it = sampled_exp.begin(); it != sampled_exp.end(); ++it) + for (SparseVector<prob_t>::iterator it = sampled_exp.begin(); it != sampled_exp.end(); ++it) tot_m.add_value(it->first, it->second.as_float()); //cerr << "DIFF: " << (tot_m - corpus[i].features) << endl; const double eta = 0.03; diff --git a/training/liblbfgs/Makefile.am b/training/liblbfgs/Makefile.am index 9327c47f..64a3794d 100644 --- a/training/liblbfgs/Makefile.am +++ b/training/liblbfgs/Makefile.am @@ -1,6 +1,8 @@ TESTS = ll_test noinst_PROGRAMS = ll_test + ll_test_SOURCES = ll_test.cc +ll_test_LDADD = liblbfgs.a -lz noinst_LIBRARIES = liblbfgs.a diff --git a/training/model1.cc b/training/model1.cc index 73104304..19692b9a 100644 --- a/training/model1.cc +++ b/training/model1.cc @@ -5,7 +5,7 @@ #include <boost/program_options/variables_map.hpp> #include "m.h" -#include "lattice.h" +#include "corpus_tools.h" #include "stringlib.h" #include "filelib.h" #include "ttables.h" @@ -19,6 +19,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { opts.add_options() ("iterations,i",po::value<unsigned>()->default_value(5),"Number of iterations of EM training") ("beam_threshold,t",po::value<double>()->default_value(-4),"log_10 of beam threshold (-10000 to include everything, 0 max)") + ("bidir,b", "Run bidirectional alignment") ("no_null_word,N","Do not generate from the null token") ("write_alignments,A", "Write alignments instead of parameters") ("favor_diagonal,d", "Use a static alignment distribution that assigns higher probabilities to alignments near the diagonal") @@ -51,6 +52,15 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { return true; } +// src and trg are source and target strings, respectively (not really lattices) +double PosteriorInference(const vector<WordID>& src, const vector<WordID>& trg) { + double llh = 0; + static vector<double> unnormed_a_i; + if (src.size() > unnormed_a_i.size()) + unnormed_a_i.resize(src.size()); + return llh; +} + int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; @@ -74,8 +84,8 @@ int main(int argc, char** argv) { return 1; } - TTable tt; - TTable::Word2Word2Double was_viterbi; + TTable s2t, t2s; + TTable::Word2Word2Double s2t_viterbi; double tot_len_ratio = 0; double mean_srclen_multiplier = 0; vector<double> unnormed_a_i; @@ -96,14 +106,11 @@ int main(int argc, char** argv) { ++lc; if (lc % 1000 == 0) { cerr << '.'; flag = true; } if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } - ParseTranslatorInput(line, &ssrc, &strg); - Lattice src, trg; - LatticeTools::ConvertTextToLattice(ssrc, &src); - LatticeTools::ConvertTextToLattice(strg, &trg); + vector<WordID> src, trg; + CorpusTools::ReadLine(line, &src, &trg); if (src.size() == 0 || trg.size() == 0) { cerr << "Error: " << lc << "\n" << line << endl; - assert(src.size() > 0); - assert(trg.size() > 0); + return 1; } if (src.size() > unnormed_a_i.size()) unnormed_a_i.resize(src.size()); @@ -113,13 +120,13 @@ int main(int argc, char** argv) { vector<double> probs(src.size() + 1); bool first_al = true; // used for write_alignments for (int j = 0; j < trg.size(); ++j) { - const WordID& f_j = trg[j][0].label; + const WordID& f_j = trg[j]; double sum = 0; const double j_over_ts = double(j) / trg.size(); double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1) if (use_null) { if (favor_diagonal) prob_a_i = prob_align_null; - probs[0] = tt.prob(kNULL, f_j) * prob_a_i; + probs[0] = s2t.prob(kNULL, f_j) * prob_a_i; sum += probs[0]; } double az = 0; @@ -133,7 +140,7 @@ int main(int argc, char** argv) { for (int i = 1; i <= src.size(); ++i) { if (favor_diagonal) prob_a_i = unnormed_a_i[i-1] / az; - probs[i] = tt.prob(src[i-1][0].label, f_j) * prob_a_i; + probs[i] = s2t.prob(src[i-1], f_j) * prob_a_i; sum += probs[i]; } if (final_iteration) { @@ -150,7 +157,7 @@ int main(int argc, char** argv) { if (probs[i] > max_p) { max_index = i; max_p = probs[i]; - max_i = src[i-1][0].label; + max_i = src[i-1]; } } if (write_alignments) { @@ -159,13 +166,13 @@ int main(int argc, char** argv) { cout << (max_index - 1) << "-" << j; } } - was_viterbi[max_i][f_j] = 1.0; + s2t_viterbi[max_i][f_j] = 1.0; } } else { if (use_null) - tt.Increment(kNULL, f_j, probs[0] / sum); + s2t.Increment(kNULL, f_j, probs[0] / sum); for (int i = 1; i <= src.size(); ++i) - tt.Increment(src[i-1][0].label, f_j, probs[i] / sum); + s2t.Increment(src[i-1], f_j, probs[i] / sum); } likelihood += log(sum); } @@ -186,9 +193,9 @@ int main(int argc, char** argv) { cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; if (!final_iteration) { if (variational_bayes) - tt.NormalizeVB(alpha); + s2t.NormalizeVB(alpha); else - tt.Normalize(); + s2t.Normalize(); } } if (testset.size()) { @@ -199,23 +206,21 @@ int main(int argc, char** argv) { string ssrc, strg, line; while (getline(in, line)) { ++lc; - ParseTranslatorInput(line, &ssrc, &strg); - Lattice src, trg; - LatticeTools::ConvertTextToLattice(ssrc, &src); - LatticeTools::ConvertTextToLattice(strg, &trg); + vector<WordID> src, trg; + CorpusTools::ReadLine(line, &src, &trg); double log_prob = Md::log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier); if (src.size() > unnormed_a_i.size()) unnormed_a_i.resize(src.size()); // compute likelihood for (int j = 0; j < trg.size(); ++j) { - const WordID& f_j = trg[j][0].label; + const WordID& f_j = trg[j]; double sum = 0; const double j_over_ts = double(j) / trg.size(); double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1) if (use_null) { if (favor_diagonal) prob_a_i = prob_align_null; - sum += tt.prob(kNULL, f_j) * prob_a_i; + sum += s2t.prob(kNULL, f_j) * prob_a_i; } double az = 0; if (favor_diagonal) { @@ -228,7 +233,7 @@ int main(int argc, char** argv) { for (int i = 1; i <= src.size(); ++i) { if (favor_diagonal) prob_a_i = unnormed_a_i[i-1] / az; - sum += tt.prob(src[i-1][0].label, f_j) * prob_a_i; + sum += s2t.prob(src[i-1], f_j) * prob_a_i; } log_prob += log(sum); } @@ -240,16 +245,16 @@ int main(int argc, char** argv) { if (write_alignments) return 0; - for (TTable::Word2Word2Double::iterator ei = tt.ttable.begin(); ei != tt.ttable.end(); ++ei) { + for (TTable::Word2Word2Double::iterator ei = s2t.ttable.begin(); ei != s2t.ttable.end(); ++ei) { const TTable::Word2Double& cpd = ei->second; - const TTable::Word2Double& vit = was_viterbi[ei->first]; + const TTable::Word2Double& vit = s2t_viterbi[ei->first]; const string& esym = TD::Convert(ei->first); double max_p = -1; for (TTable::Word2Double::const_iterator fi = cpd.begin(); fi != cpd.end(); ++fi) if (fi->second > max_p) max_p = fi->second; const double threshold = max_p * BEAM_THRESHOLD; for (TTable::Word2Double::const_iterator fi = cpd.begin(); fi != cpd.end(); ++fi) { - if (fi->second > threshold || (vit.count(fi->first) > 0)) { + if (fi->second > threshold || (vit.find(fi->first) != vit.end())) { cout << esym << ' ' << TD::Convert(fi->first) << ' ' << log(fi->second) << endl; } } diff --git a/training/mpi_flex_optimize.cc b/training/mpi_flex_optimize.cc index a9ead018..b52decdc 100644 --- a/training/mpi_flex_optimize.cc +++ b/training/mpi_flex_optimize.cc @@ -356,7 +356,7 @@ int main(int argc, char** argv) { gg.clear(); gg.resize(FD::NumFeats()); if (gg.size() != cur_weights.size()) { cur_weights.resize(gg.size()); } - for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) + for (SparseVector<double>::iterator it = g.begin(); it != g.end(); ++it) if (it->first) { gg[it->first] = it->second; } g.clear(); double r = ApplyRegularizationTerms(regularization_strength, diff --git a/training/ttables.h b/training/ttables.h index bf3351d2..9baa13ca 100644 --- a/training/ttables.h +++ b/training/ttables.h @@ -4,6 +4,7 @@ #include <iostream> #include <tr1/unordered_map> +#include "sparse_vector.h" #include "m.h" #include "wordid.h" #include "tdict.h" @@ -68,18 +69,18 @@ class TTable { } return *this; } - void ShowTTable() { - for (Word2Word2Double::iterator it = ttable.begin(); it != ttable.end(); ++it) { - Word2Double& cpd = it->second; - for (Word2Double::iterator j = cpd.begin(); j != cpd.end(); ++j) { + void ShowTTable() const { + for (Word2Word2Double::const_iterator it = ttable.begin(); it != ttable.end(); ++it) { + const Word2Double& cpd = it->second; + for (Word2Double::const_iterator j = cpd.begin(); j != cpd.end(); ++j) { std::cerr << "P(" << TD::Convert(j->first) << '|' << TD::Convert(it->first) << ") = " << j->second << std::endl; } } } - void ShowCounts() { - for (Word2Word2Double::iterator it = counts.begin(); it != counts.end(); ++it) { - Word2Double& cpd = it->second; - for (Word2Double::iterator j = cpd.begin(); j != cpd.end(); ++j) { + void ShowCounts() const { + for (Word2Word2Double::const_iterator it = counts.begin(); it != counts.end(); ++it) { + const Word2Double& cpd = it->second; + for (Word2Double::const_iterator j = cpd.begin(); j != cpd.end(); ++j) { std::cerr << "c(" << TD::Convert(j->first) << '|' << TD::Convert(it->first) << ") = " << j->second << std::endl; } } diff --git a/utils/ccrp_onetable.h b/utils/ccrp_onetable.h index 1fe01b0e..abe399ea 100644 --- a/utils/ccrp_onetable.h +++ b/utils/ccrp_onetable.h @@ -183,7 +183,7 @@ class CCRP_OneTable { assert(has_discount_prior() || has_alpha_prior()); DiscountResampler dr(*this); ConcentrationResampler cr(*this); - for (int iter = 0; iter < nloop; ++iter) { + for (unsigned iter = 0; iter < nloop; ++iter) { if (has_alpha_prior()) { alpha_ = slice_sampler1d(cr, alpha_, *rng, 0.0, std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); diff --git a/utils/corpus_tools.cc b/utils/corpus_tools.cc index d17785af..191153a2 100644 --- a/utils/corpus_tools.cc +++ b/utils/corpus_tools.cc @@ -8,6 +8,26 @@ using namespace std; +void CorpusTools::ReadLine(const string& line, + vector<WordID>* src, + vector<WordID>* trg) { + static const WordID kDIV = TD::Convert("|||"); + static vector<WordID> tmp; + src->clear(); + trg->clear(); + TD::ConvertSentence(line, &tmp); + unsigned i = 0; + while(i < tmp.size() && tmp[i] != kDIV) { + src->push_back(tmp[i]); + ++i; + } + if (i < tmp.size() && tmp[i] == kDIV) { + ++i; + for (; i < tmp.size() ; ++i) + trg->push_back(tmp[i]); + } +} + void CorpusTools::ReadFromFile(const string& filename, vector<vector<WordID> >* src, set<WordID>* src_vocab, diff --git a/utils/corpus_tools.h b/utils/corpus_tools.h index 97bdaa94..f6699d87 100644 --- a/utils/corpus_tools.h +++ b/utils/corpus_tools.h @@ -7,6 +7,10 @@ #include "wordid.h" struct CorpusTools { + static void ReadLine(const std::string& line, + std::vector<WordID>* src, + std::vector<WordID>* trg); + static void ReadFromFile(const std::string& filename, std::vector<std::vector<WordID> >* src, std::set<WordID>* src_vocab = NULL, diff --git a/utils/dict.h b/utils/dict.h index 75ea3def..f08d0cf4 100644 --- a/utils/dict.h +++ b/utils/dict.h @@ -12,7 +12,8 @@ class Dict { typedef - HASH_MAP<std::string, WordID, boost::hash<std::string> > Map; + //HASH_MAP<std::string, WordID, boost::hash<std::string> > Map; + HASH_MAP<std::string, WordID> Map; public: Dict() : b0_("<bad0>") { HASH_MAP_EMPTY(d_,"<bad1>"); diff --git a/utils/fast_sparse_vector.h b/utils/fast_sparse_vector.h index e86cbdc1..433a5cc5 100644 --- a/utils/fast_sparse_vector.h +++ b/utils/fast_sparse_vector.h @@ -66,6 +66,60 @@ BOOST_STATIC_ASSERT(sizeof(PairIntT<float>) == sizeof(std::pair<unsigned,float>) template <typename T, unsigned LOCAL_MAX = (sizeof(T) == sizeof(float) ? 15u : 7u)> class FastSparseVector { public: + struct iterator { + iterator(FastSparseVector<T>& v, const bool is_end) : local_(!v.is_remote_) { + if (local_) { + local_it_ = &v.data_.local[is_end ? v.local_size_ : 0]; + } else { + if (is_end) + remote_it_ = v.data_.rbmap->end(); + else + remote_it_ = v.data_.rbmap->begin(); + } + } + iterator(FastSparseVector<T>& v, const bool, const unsigned k) : local_(!v.is_remote_) { + if (local_) { + unsigned i = 0; + while(i < v.local_size_ && v.data_.local[i].first() != k) { ++i; } + local_it_ = &v.data_.local[i]; + } else { + remote_it_ = v.data_.rbmap->find(k); + } + } + const bool local_; + PairIntT<T>* local_it_; + typename SPARSE_HASH_MAP<unsigned, T>::iterator remote_it_; + std::pair<const unsigned, T>& operator*() const { + if (local_) + return *reinterpret_cast<std::pair<const unsigned, T>*>(local_it_); + else + return *remote_it_; + } + + std::pair<const unsigned, T>* operator->() const { + if (local_) + return reinterpret_cast<std::pair<const unsigned, T>*>(local_it_); + else + return &*remote_it_; + } + + iterator& operator++() { + if (local_) ++local_it_; else ++remote_it_; + return *this; + } + + inline bool operator==(const iterator& o) const { + if (o.local_ != local_) return false; + if (local_) { + return local_it_ == o.local_it_; + } else { + return remote_it_ == o.remote_it_; + } + } + inline bool operator!=(const iterator& o) const { + return !(o == *this); + } + }; struct const_iterator { const_iterator(const FastSparseVector<T>& v, const bool is_end) : local_(!v.is_remote_) { if (local_) { @@ -77,12 +131,21 @@ class FastSparseVector { remote_it_ = v.data_.rbmap->begin(); } } + const_iterator(const FastSparseVector<T>& v, const bool, const unsigned k) : local_(!v.is_remote_) { + if (local_) { + unsigned i = 0; + while(i < v.local_size_ && v.data_.local[i].first() != k) { ++i; } + local_it_ = &v.data_.local[i]; + } else { + remote_it_ = v.data_.rbmap->find(k); + } + } const bool local_; const PairIntT<T>* local_it_; - typename std::map<unsigned, T>::const_iterator remote_it_; + typename SPARSE_HASH_MAP<unsigned, T>::const_iterator remote_it_; const std::pair<const unsigned, T>& operator*() const { if (local_) - return *reinterpret_cast<const std::pair<const unsigned, float>*>(local_it_); + return *reinterpret_cast<const std::pair<const unsigned, T>*>(local_it_); else return *remote_it_; } @@ -118,7 +181,7 @@ class FastSparseVector { } FastSparseVector(const FastSparseVector& other) { std::memcpy(this, &other, sizeof(FastSparseVector)); - if (is_remote_) data_.rbmap = new std::map<unsigned, T>(*data_.rbmap); + if (is_remote_) data_.rbmap = new SPARSE_HASH_MAP<unsigned, T>(*data_.rbmap); } FastSparseVector(std::pair<unsigned, T>* first, std::pair<unsigned, T>* last) { const ptrdiff_t n = last - first; @@ -128,7 +191,7 @@ class FastSparseVector { std::memcpy(data_.local, first, sizeof(std::pair<unsigned, T>) * n); } else { is_remote_ = true; - data_.rbmap = new std::map<unsigned, T>(first, last); + data_.rbmap = new SPARSE_HASH_MAP<unsigned, T>(first, last); } } void erase(int k) { @@ -150,7 +213,7 @@ class FastSparseVector { clear(); std::memcpy(this, &other, sizeof(FastSparseVector)); if (is_remote_) - data_.rbmap = new std::map<unsigned, T>(*data_.rbmap); + data_.rbmap = new SPARSE_HASH_MAP<unsigned, T>(*data_.rbmap); return *this; } T const& get_singleton() const { @@ -160,6 +223,9 @@ class FastSparseVector { bool nonzero(unsigned k) const { return static_cast<bool>(value(k)); } + inline T& operator[](unsigned k) { + return get_or_create_bin(k); + } inline void set_value(unsigned k, const T& v) { get_or_create_bin(k) = v; } @@ -171,7 +237,7 @@ class FastSparseVector { } inline T value(unsigned k) const { if (is_remote_) { - typename std::map<unsigned, T>::const_iterator it = data_.rbmap->find(k); + typename SPARSE_HASH_MAP<unsigned, T>::const_iterator it = data_.rbmap->find(k); if (it != data_.rbmap->end()) return it->second; } else { for (unsigned i = 0; i < local_size_; ++i) { @@ -256,8 +322,8 @@ class FastSparseVector { } inline FastSparseVector& operator*=(const T& scalar) { if (is_remote_) { - const typename std::map<unsigned, T>::iterator end = data_.rbmap->end(); - for (typename std::map<unsigned, T>::iterator it = data_.rbmap->begin(); it != end; ++it) + const typename SPARSE_HASH_MAP<unsigned, T>::iterator end = data_.rbmap->end(); + for (typename SPARSE_HASH_MAP<unsigned, T>::iterator it = data_.rbmap->begin(); it != end; ++it) it->second *= scalar; } else { for (int i = 0; i < local_size_; ++i) @@ -267,8 +333,8 @@ class FastSparseVector { } inline FastSparseVector& operator/=(const T& scalar) { if (is_remote_) { - const typename std::map<unsigned, T>::iterator end = data_.rbmap->end(); - for (typename std::map<unsigned, T>::iterator it = data_.rbmap->begin(); it != end; ++it) + const typename SPARSE_HASH_MAP<unsigned, T>::iterator end = data_.rbmap->end(); + for (typename SPARSE_HASH_MAP<unsigned, T>::iterator it = data_.rbmap->begin(); it != end; ++it) it->second /= scalar; } else { for (int i = 0; i < local_size_; ++i) @@ -283,6 +349,18 @@ class FastSparseVector { } return o; } + iterator find(unsigned k) { + return iterator(*this, false, k); + } + iterator begin() { + return iterator(*this, false); + } + iterator end() { + return iterator(*this, true); + } + const_iterator find(unsigned k) const { + return const_iterator(*this, false, k); + } const_iterator begin() const { return const_iterator(*this, false); } @@ -353,17 +431,19 @@ class FastSparseVector { void swap_local_rbmap() { if (is_remote_) { // data is in rbmap, move to local assert(data_.rbmap->size() < LOCAL_MAX); - const std::map<unsigned, T>* m = data_.rbmap; + const SPARSE_HASH_MAP<unsigned, T>* m = data_.rbmap; local_size_ = m->size(); int i = 0; - for (typename std::map<unsigned, T>::const_iterator it = m->begin(); + for (typename SPARSE_HASH_MAP<unsigned, T>::const_iterator it = m->begin(); it != m->end(); ++it) { data_.local[i] = *it; ++i; } is_remote_ = false; } else { // data is local, move to rbmap - std::map<unsigned, T>* m = new std::map<unsigned, T>(&data_.local[0], &data_.local[local_size_]); + SPARSE_HASH_MAP<unsigned, T>* m = new SPARSE_HASH_MAP<unsigned, T>( + reinterpret_cast<std::pair<unsigned, T>*>(&data_.local[0]), + reinterpret_cast<std::pair<unsigned, T>*>(&data_.local[local_size_]), local_size_ * 1.5 + 1); data_.rbmap = m; is_remote_ = true; } @@ -371,7 +451,7 @@ class FastSparseVector { union { PairIntT<T> local[LOCAL_MAX]; - std::map<unsigned, T>* rbmap; + SPARSE_HASH_MAP<unsigned, T>* rbmap; } data_; unsigned char local_size_; bool is_remote_; diff --git a/utils/hash.h b/utils/hash.h index 31457430..6d992086 100644 --- a/utils/hash.h +++ b/utils/hash.h @@ -10,8 +10,10 @@ #endif #ifdef HAVE_SPARSEHASH -# include <google/dense_hash_map> -# include <google/dense_hash_set> +# include <sparsehash/dense_hash_map> +# include <sparsehash/dense_hash_set> +# include <sparsehash/sparse_hash_map> +# define SPARSE_HASH_MAP google::sparse_hash_map # define HASH_MAP google::dense_hash_map # define HASH_SET google::dense_hash_set # define HASH_MAP_RESERVED(h,empty,deleted) do { h.set_empty_key(empty); h.set_deleted_key(deleted); } while(0) @@ -19,6 +21,7 @@ #else # include <tr1/unordered_map> # include <tr1/unordered_set> +# define SPARSE_HASH_MAP std::tr1::unordered_map # define HASH_MAP std::tr1::unordered_map # define HASH_SET std::tr1::unordered_set # define HASH_MAP_RESERVED(h,empty,deleted) diff --git a/utils/sampler.h b/utils/sampler.h index b237c716..3e4a4086 100644 --- a/utils/sampler.h +++ b/utils/sampler.h @@ -12,6 +12,7 @@ #include <boost/random/mersenne_twister.hpp> #include <boost/random/uniform_real.hpp> #include <boost/random/variate_generator.hpp> +#include <boost/random/gamma_distribution.hpp> #include <boost/random/normal_distribution.hpp> #include <boost/random/poisson_distribution.hpp> #include <boost/random/uniform_int.hpp> @@ -76,6 +77,18 @@ struct RandomNumberGenerator { return boost::poisson_distribution<int>(lambda)(m_random); } + double NextGamma(double shape, double scale = 1.0) { + boost::gamma_distribution<> gamma(shape); + boost::variate_generator<boost::mt19937&,boost::gamma_distribution<> > vg(m_generator, gamma); + return vg() * scale; + } + + double NextBeta(double alpha, double beta) { + double x = NextGamma(alpha); + double y = NextGamma(beta); + return x / (x + y); + } + bool AcceptMetropolisHastings(const prob_t& p_cur, const prob_t& p_prev, const prob_t& q_cur, @@ -123,11 +136,12 @@ size_t RandomNumberGenerator<RNG>::SelectSample(const SampleSet<F>& ss, double T const bool anneal = (T != 1.0); F sum = F(0); if (anneal) { - for (int i = 0; i < ss.m_scores.size(); ++i) + for (unsigned i = 0; i < ss.m_scores.size(); ++i) sum += pow(ss.m_scores[i], annealing_factor); // p^(1/T) } else { sum = std::accumulate(ss.m_scores.begin(), ss.m_scores.end(), F(0)); } + //std::cerr << "SUM: " << sum << std::endl; //for (size_t i = 0; i < ss.m_scores.size(); ++i) std::cerr << ss.m_scores[i] << ","; //std::cerr << std::endl; |