From f0bdd4de6455855d705d9056deb2e90c999dc740 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 20 Jan 2012 15:35:47 -0500 Subject: 'pseudo model 2' that strictly favors a diagonal, with tunable parameters for p(null) and how sharp/flat the alignment distribution is around the diagonal --- training/model1.cc | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) (limited to 'training') diff --git a/training/model1.cc b/training/model1.cc index b9590ece..346c0033 100644 --- a/training/model1.cc +++ b/training/model1.cc @@ -20,6 +20,10 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { ("iterations,i",po::value()->default_value(5),"Number of iterations of EM training") ("beam_threshold,t",po::value()->default_value(-4),"log_10 of beam threshold (-10000 to include everything, 0 max)") ("no_null_word,N","Do not generate from the null token") + ("write_alignments,A", "Write alignments instead of parameters") + ("favor_diagonal,d", "Use a static alignment distribution that assigns higher probabilities to alignments near the diagonal") + ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)") + ("prob_align_null", po::value()->default_value(0.08), "When --favor_diagonal is set, what's the probability of a null alignment?") ("variational_bayes,v","Add a symmetric Dirichlet prior and infer VB estimate of weights") ("alpha,a", po::value()->default_value(0.01), "Hyperparameter for optional Dirichlet prior") ("no_add_viterbi,V","Do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)"); @@ -56,7 +60,12 @@ int main(int argc, char** argv) { const WordID kNULL = TD::Convert(""); const bool add_viterbi = (conf.count("no_add_viterbi") == 0); const bool variational_bayes = (conf.count("variational_bayes") > 0); + const bool write_alignments = (conf.count("write_alignments") > 0); + const double diagonal_tension = conf["diagonal_tension"].as(); + const double prob_align_null = conf["prob_align_null"].as(); + const double prob_align_not_null = 1.0 - prob_align_null; const double alpha = conf["alpha"].as(); + const bool favor_diagonal = conf.count("favor_diagonal"); if (variational_bayes && alpha <= 0.0) { cerr << "--alpha must be > 0\n"; return 1; @@ -93,31 +102,52 @@ int main(int argc, char** argv) { denom += trg.size(); vector probs(src.size() + 1); const double src_logprob = -log(src.size() + 1); + bool first_al = true; // used for write_alignments for (int j = 0; j < trg.size(); ++j) { const WordID& f_j = trg[j][0].label; double sum = 0; + const double j_over_ts = double(j) / trg.size(); + double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1) if (use_null) { - probs[0] = tt.prob(kNULL, f_j); + if (favor_diagonal) prob_a_i = prob_align_null; + probs[0] = tt.prob(kNULL, f_j) * prob_a_i; sum += probs[0]; } + double az = 0; + if (favor_diagonal) { + for (int ta = 0; ta < src.size(); ++ta) + az += exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + az /= prob_align_not_null; + } for (int i = 1; i <= src.size(); ++i) { - probs[i] = tt.prob(src[i-1][0].label, f_j); + if (favor_diagonal) + prob_a_i = exp(-fabs(double(i) / src.size() - j_over_ts) * diagonal_tension) / az; + probs[i] = tt.prob(src[i-1][0].label, f_j) * prob_a_i; sum += probs[i]; } if (final_iteration) { - if (add_viterbi) { + if (add_viterbi || write_alignments) { WordID max_i = 0; double max_p = -1; + int max_index = -1; if (use_null) { max_i = kNULL; + max_index = 0; max_p = probs[0]; } for (int i = 1; i <= src.size(); ++i) { if (probs[i] > max_p) { + max_index = i; max_p = probs[i]; max_i = src[i-1][0].label; } } + if (write_alignments) { + if (max_index > 0) { + if (first_al) first_al = false; else cout << ' '; + cout << (max_index - 1) << "-" << j; + } + } was_viterbi[max_i][f_j] = 1.0; } } else { @@ -128,6 +158,7 @@ int main(int argc, char** argv) { } likelihood += log(sum) + src_logprob; } + if (write_alignments && final_iteration) cout << endl; } // log(e) = 1.0 @@ -145,6 +176,8 @@ int main(int argc, char** argv) { tt.Normalize(); } } + if (write_alignments) return 0; + for (TTable::Word2Word2Double::iterator ei = tt.ttable.begin(); ei != tt.ttable.end(); ++ei) { const TTable::Word2Double& cpd = ei->second; const TTable::Word2Double& vit = was_viterbi[ei->first]; -- cgit v1.2.3 From 4c2360119def2fb624d2691b355b1908c511f004 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 24 Jan 2012 22:26:44 -0500 Subject: more models --- gi/pf/align-lexonly.cc | 14 +++++++---- gi/pf/base_measures.cc | 2 +- gi/pf/base_measures.h | 27 ++++++++++++++++++++- training/model1.cc | 64 +++++++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 98 insertions(+), 9 deletions(-) (limited to 'training') diff --git a/gi/pf/align-lexonly.cc b/gi/pf/align-lexonly.cc index e9f1e7b6..76e2e009 100644 --- a/gi/pf/align-lexonly.cc +++ b/gi/pf/align-lexonly.cc @@ -122,10 +122,11 @@ struct BasicLexicalAlignment { vector* corp) : letters(lets), corpus(*corp), + up0("fr-en.10k.translit-base.txt.gz"), //up0(words_e), //up0("en.chars.1gram", letters_e), //up0("en.words.1gram"), - up0(letters_e), + //up0(letters_e), //up0("en.chars.2gram"), tmodel(up0) { } @@ -180,14 +181,18 @@ struct BasicLexicalAlignment { //PhraseConditionalUninformativeUnigramBase up0; //UnigramWordBase up0; //HierarchicalUnigramBase up0; - HierarchicalWordBase up0; + TableLookupBase up0; + //HierarchicalWordBase up0; + //PoissonUniformUninformativeBase up0; //CompletelyUniformBase up0; //FixedNgramBase up0; //ConditionalTranslationModel tmodel; //ConditionalTranslationModel tmodel; //ConditionalTranslationModel tmodel; //ConditionalTranslationModel tmodel; - ConditionalTranslationModel tmodel; + //ConditionalTranslationModel tmodel; + //ConditionalTranslationModel tmodel; + ConditionalTranslationModel tmodel; //ConditionalTranslationModel tmodel; //ConditionalTranslationModel tmodel; }; @@ -222,6 +227,7 @@ void BasicLexicalAlignment::ResampleCorpus() { void ExtractLetters(const set& v, vector >* l, set* letset = NULL) { for (set::const_iterator it = v.begin(); it != v.end(); ++it) { + if (*it >= l->size()) { l->resize(*it + 1); } vector& letters = (*l)[*it]; if (letters.size()) continue; // if e and f have the same word @@ -308,7 +314,7 @@ int main(int argc, char** argv) { x.InitializeRandom(); const unsigned samples = conf["samples"].as(); for (int i = 0; i < samples; ++i) { - for (int j = 4995; j < 4997; ++j) Debug(corpus[j]); + for (int j = 395; j < 397; ++j) Debug(corpus[j]); cerr << i << "\t" << x.tmodel.r.size() << "\t"; if (i % 10 == 0) x.ResampleHyperparemeters(); x.ResampleCorpus(); diff --git a/gi/pf/base_measures.cc b/gi/pf/base_measures.cc index 7894d3e7..4b1863fa 100644 --- a/gi/pf/base_measures.cc +++ b/gi/pf/base_measures.cc @@ -37,7 +37,7 @@ TableLookupBase::TableLookupBase(const string& fname) { } else if (cc == 1) { x.e_.push_back(cur); } else if (cc == 2) { - table[x] = atof(TD::Convert(cur)); + table[x].logeq(atof(TD::Convert(cur))); ++cc; } else { if (flag) cerr << endl; diff --git a/gi/pf/base_measures.h b/gi/pf/base_measures.h index 7214aa22..b0495bfd 100644 --- a/gi/pf/base_measures.h +++ b/gi/pf/base_measures.h @@ -51,6 +51,22 @@ struct Model1 { std::vector > ttable; }; +struct PoissonUniformUninformativeBase { + explicit PoissonUniformUninformativeBase(const unsigned ves) : kUNIFORM(1.0 / ves) {} + prob_t operator()(const TRule& r) const { + prob_t p; p.logeq(log_poisson(r.e_.size(), 1.0)); + prob_t q = kUNIFORM; q.poweq(r.e_.size()); + p *= q; + return p; + } + void Summary() const {} + void ResampleHyperparameters(MT19937*) {} + void Increment(const TRule&) {} + void Decrement(const TRule&) {} + prob_t Likelihood() const { return prob_t::One(); } + const prob_t kUNIFORM; +}; + struct CompletelyUniformBase { explicit CompletelyUniformBase(const unsigned ves) : kUNIFORM(1.0 / ves) {} prob_t operator()(const TRule&) const { @@ -83,10 +99,19 @@ struct TableLookupBase { prob_t operator()(const TRule& rule) const { const std::tr1::unordered_map::const_iterator it = table.find(rule); - assert(it != table.end()); + if (it == table.end()) { + std::cerr << rule << " not found\n"; + abort(); + } return it->second; } + void ResampleHyperparameters(MT19937*) {} + void Increment(const TRule&) {} + void Decrement(const TRule&) {} + prob_t Likelihood() const { return prob_t::One(); } + void Summary() const {} + std::tr1::unordered_map table; }; diff --git a/training/model1.cc b/training/model1.cc index 346c0033..40249aa3 100644 --- a/training/model1.cc +++ b/training/model1.cc @@ -14,6 +14,11 @@ namespace po = boost::program_options; using namespace std; +inline double log_poisson(unsigned x, const double& lambda) { + assert(lambda > 0.0); + return log(lambda) * x - lgamma(x + 1) - lambda; +} + bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() @@ -25,6 +30,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)") ("prob_align_null", po::value()->default_value(0.08), "When --favor_diagonal is set, what's the probability of a null alignment?") ("variational_bayes,v","Add a symmetric Dirichlet prior and infer VB estimate of weights") + ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model") ("alpha,a", po::value()->default_value(0.01), "Hyperparameter for optional Dirichlet prior") ("no_add_viterbi,V","Do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)"); po::options_description clo("Command line options"); @@ -63,6 +69,8 @@ int main(int argc, char** argv) { const bool write_alignments = (conf.count("write_alignments") > 0); const double diagonal_tension = conf["diagonal_tension"].as(); const double prob_align_null = conf["prob_align_null"].as(); + string testset; + if (conf.count("testset")) testset = conf["testset"].as(); const double prob_align_not_null = 1.0 - prob_align_null; const double alpha = conf["alpha"].as(); const bool favor_diagonal = conf.count("favor_diagonal"); @@ -73,6 +81,8 @@ int main(int argc, char** argv) { TTable tt; TTable::Word2Word2Double was_viterbi; + double tot_len_ratio = 0; + double mean_srclen_multiplier = 0; for (int iter = 0; iter < ITERATIONS; ++iter) { const bool final_iteration = (iter == (ITERATIONS - 1)); cerr << "ITERATION " << (iter + 1) << (final_iteration ? " (FINAL)" : "") << endl; @@ -83,13 +93,13 @@ int main(int argc, char** argv) { int lc = 0; bool flag = false; string line; + string ssrc, strg; while(true) { getline(in, line); if (!in) break; ++lc; if (lc % 1000 == 0) { cerr << '.'; flag = true; } if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } - string ssrc, strg; ParseTranslatorInput(line, &ssrc, &strg); Lattice src, trg; LatticeTools::ConvertTextToLattice(ssrc, &src); @@ -99,9 +109,10 @@ int main(int argc, char** argv) { assert(src.size() > 0); assert(trg.size() > 0); } + if (iter == 0) + tot_len_ratio += static_cast(trg.size()) / static_cast(src.size()); denom += trg.size(); vector probs(src.size() + 1); - const double src_logprob = -log(src.size() + 1); bool first_al = true; // used for write_alignments for (int j = 0; j < trg.size(); ++j) { const WordID& f_j = trg[j][0].label; @@ -156,7 +167,7 @@ int main(int argc, char** argv) { for (int i = 1; i <= src.size(); ++i) tt.Increment(src[i-1][0].label, f_j, probs[i] / sum); } - likelihood += log(sum) + src_logprob; + likelihood += log(sum); } if (write_alignments && final_iteration) cout << endl; } @@ -165,6 +176,10 @@ int main(int argc, char** argv) { double base2_likelihood = likelihood / log(2); if (flag) { cerr << endl; } + if (iter == 0) { + mean_srclen_multiplier = tot_len_ratio / lc; + cerr << "expected target length = source length * " << mean_srclen_multiplier << endl; + } cerr << " log_e likelihood: " << likelihood << endl; cerr << " log_2 likelihood: " << base2_likelihood << endl; cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; @@ -176,6 +191,49 @@ int main(int argc, char** argv) { tt.Normalize(); } } + if (testset.size()) { + ReadFile rf(testset); + istream& in = *rf.stream(); + int lc = 0; + double tlp = 0; + string ssrc, strg, line; + while (getline(in, line)) { + ++lc; + ParseTranslatorInput(line, &ssrc, &strg); + Lattice src, trg; + LatticeTools::ConvertTextToLattice(ssrc, &src); + LatticeTools::ConvertTextToLattice(strg, &trg); + double log_prob = log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier); + + // compute likelihood + for (int j = 0; j < trg.size(); ++j) { + const WordID& f_j = trg[j][0].label; + double sum = 0; + const double j_over_ts = double(j) / trg.size(); + double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1) + if (use_null) { + if (favor_diagonal) prob_a_i = prob_align_null; + sum += tt.prob(kNULL, f_j) * prob_a_i; + } + double az = 0; + if (favor_diagonal) { + for (int ta = 0; ta < src.size(); ++ta) + az += exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + az /= prob_align_not_null; + } + for (int i = 1; i <= src.size(); ++i) { + if (favor_diagonal) + prob_a_i = exp(-fabs(double(i) / src.size() - j_over_ts) * diagonal_tension) / az; + sum += tt.prob(src[i-1][0].label, f_j) * prob_a_i; + } + log_prob += log(sum); + } + tlp += log_prob; + cerr << ssrc << " ||| " << strg << " ||| " << log_prob << endl; + } + cerr << "TOTAL LOG PROB " << tlp << endl; + } + if (write_alignments) return 0; for (TTable::Word2Word2Double::iterator ei = tt.ttable.begin(); ei != tt.ttable.end(); ++ei) { -- cgit v1.2.3 From 21a4b6629fedae575583f0d1e34c97dba8de2511 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 10 Feb 2012 13:17:12 -0500 Subject: clean up alignment tools --- training/atools.cc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'training') diff --git a/training/atools.cc b/training/atools.cc index 42579627..82e30c38 100644 --- a/training/atools.cc +++ b/training/atools.cc @@ -8,7 +8,6 @@ #include #include "filelib.h" -#include "aligner.h" #include "alignment_pharaoh.h" namespace po = boost::program_options; @@ -79,7 +78,7 @@ struct FMeasureCommand : public Command { struct DisplayCommand : public Command { string Name() const { return "display"; } bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D¬_used, Array2D* x) { + void Apply(const Array2D& in, const Array2D&, Array2D* x) { *x = in; cout << *x << endl; } @@ -88,7 +87,7 @@ struct DisplayCommand : public Command { struct ConvertCommand : public Command { string Name() const { return "convert"; } bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D¬_used, Array2D* x) { + void Apply(const Array2D& in, const Array2D&, Array2D* x) { *x = in; } }; @@ -96,7 +95,7 @@ struct ConvertCommand : public Command { struct InvertCommand : public Command { string Name() const { return "invert"; } bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D¬_used, Array2D* x) { + void Apply(const Array2D& in, const Array2D&, Array2D* x) { Array2D& res = *x; res.resize(in.height(), in.width()); for (int i = 0; i < in.height(); ++i) @@ -275,8 +274,8 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } string cstr = os.str(); opts.add_options() - ("input_1,i", po::value(), "[REQ] Alignment 1 file, - for STDIN") - ("input_2,j", po::value(), "[OPT] Alignment 2 file, - for STDIN") + ("input_1,i", po::value(), "[REQUIRED] Alignment 1 file, - for STDIN") + ("input_2,j", po::value(), "Alignment 2 file, - for STDIN") ("command,c", po::value()->default_value("convert"), cstr.c_str()) ("help,h", "Print this help message and exit"); po::options_description clo("Command line options"); -- cgit v1.2.3 From 50105660d8c18889e8908cf3e4c583b551dc05af Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 10 Feb 2012 13:18:59 -0500 Subject: move atools to utils directory --- training/Makefile.am | 4 - training/atools.cc | 369 --------------------------------------------------- utils/Makefile.am | 4 +- utils/atools.cc | 369 +++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 372 insertions(+), 374 deletions(-) delete mode 100644 training/atools.cc create mode 100644 utils/atools.cc (limited to 'training') diff --git a/training/Makefile.am b/training/Makefile.am index 2a11ae52..d2f1ccc5 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -6,7 +6,6 @@ bin_PROGRAMS = \ mr_reduce_to_weights \ mr_optimize_reduce \ grammar_convert \ - atools \ plftools \ collapse_weights \ mpi_extract_reachable \ @@ -47,9 +46,6 @@ augment_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/lib test_ngram_SOURCES = test_ngram.cc test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz -atools_SOURCES = atools.cc -atools_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz - model1_SOURCES = model1.cc ttables.cc model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz diff --git a/training/atools.cc b/training/atools.cc deleted file mode 100644 index 82e30c38..00000000 --- a/training/atools.cc +++ /dev/null @@ -1,369 +0,0 @@ -#include -#include -#include - -#include -#include -#include -#include - -#include "filelib.h" -#include "alignment_pharaoh.h" - -namespace po = boost::program_options; -using namespace std; -using boost::shared_ptr; - -struct Command { - virtual ~Command() {} - virtual string Name() const = 0; - - // returns 1 for alignment grid output [default] - // returns 2 if Summary() should be called [for AER, etc] - virtual int Result() const { return 1; } - - virtual bool RequiresTwoOperands() const { return true; } - virtual void Apply(const Array2D& a, const Array2D& b, Array2D* x) = 0; - void EnsureSize(const Array2D& a, const Array2D& b, Array2D* x) { - x->resize(max(a.width(), b.width()), max(a.height(), b.height())); - } - static bool Safe(const Array2D& a, int i, int j) { - if (i >= 0 && j >= 0 && i < a.width() && j < a.height()) - return a(i,j); - else - return false; - } - virtual void Summary() { assert(!"Summary should have been overridden"); } -}; - -// compute fmeasure, second alignment is reference, first is hyp -struct FMeasureCommand : public Command { - FMeasureCommand() : matches(), num_predicted(), num_in_ref() {} - int Result() const { return 2; } - string Name() const { return "fmeasure"; } - bool RequiresTwoOperands() const { return true; } - void Apply(const Array2D& hyp, const Array2D& ref, Array2D* x) { - (void) x; // AER just computes statistics, not an alignment - int i_len = ref.width(); - int j_len = ref.height(); - for (int i = 0; i < i_len; ++i) { - for (int j = 0; j < j_len; ++j) { - if (ref(i,j)) { - ++num_in_ref; - if (Safe(hyp, i, j)) ++matches; - } - } - } - for (int i = 0; i < hyp.width(); ++i) - for (int j = 0; j < hyp.height(); ++j) - if (hyp(i,j)) ++num_predicted; - } - void Summary() { - if (num_predicted == 0 || num_in_ref == 0) { - cerr << "Insufficient statistics to compute f-measure!\n"; - abort(); - } - const double prec = static_cast(matches) / num_predicted; - const double rec = static_cast(matches) / num_in_ref; - cout << "P: " << prec << endl; - cout << "R: " << rec << endl; - const double f = (2.0 * prec * rec) / (rec + prec); - cout << "F: " << f << endl; - } - int matches; - int num_predicted; - int num_in_ref; -}; - -struct DisplayCommand : public Command { - string Name() const { return "display"; } - bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D&, Array2D* x) { - *x = in; - cout << *x << endl; - } -}; - -struct ConvertCommand : public Command { - string Name() const { return "convert"; } - bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D&, Array2D* x) { - *x = in; - } -}; - -struct InvertCommand : public Command { - string Name() const { return "invert"; } - bool RequiresTwoOperands() const { return false; } - void Apply(const Array2D& in, const Array2D&, Array2D* x) { - Array2D& res = *x; - res.resize(in.height(), in.width()); - for (int i = 0; i < in.height(); ++i) - for (int j = 0; j < in.width(); ++j) - res(i, j) = in(j, i); - } -}; - -struct IntersectCommand : public Command { - string Name() const { return "intersect"; } - bool RequiresTwoOperands() const { return true; } - void Apply(const Array2D& a, const Array2D& b, Array2D* x) { - EnsureSize(a, b, x); - Array2D& res = *x; - for (int i = 0; i < a.width(); ++i) - for (int j = 0; j < a.height(); ++j) - res(i, j) = Safe(a, i, j) && Safe(b, i, j); - } -}; - -struct UnionCommand : public Command { - string Name() const { return "union"; } - bool RequiresTwoOperands() const { return true; } - void Apply(const Array2D& a, const Array2D& b, Array2D* x) { - EnsureSize(a, b, x); - Array2D& res = *x; - for (int i = 0; i < res.width(); ++i) - for (int j = 0; j < res.height(); ++j) - res(i, j) = Safe(a, i, j) || Safe(b, i, j); - } -}; - -struct RefineCommand : public Command { - RefineCommand() { - neighbors_.push_back(make_pair(1,0)); - neighbors_.push_back(make_pair(-1,0)); - neighbors_.push_back(make_pair(0,1)); - neighbors_.push_back(make_pair(0,-1)); - } - bool RequiresTwoOperands() const { return true; } - - void Align(int i, int j) { - res_(i, j) = true; - is_i_aligned_[i] = true; - is_j_aligned_[j] = true; - } - - bool IsNeighborAligned(int i, int j) const { - for (int k = 0; k < neighbors_.size(); ++k) { - const int di = neighbors_[k].first; - const int dj = neighbors_[k].second; - if (Safe(res_, i + di, j + dj)) - return true; - } - return false; - } - - bool IsNeitherAligned(int i, int j) const { - return !(is_i_aligned_[i] || is_j_aligned_[j]); - } - - bool IsOneOrBothUnaligned(int i, int j) const { - return !(is_i_aligned_[i] && is_j_aligned_[j]); - } - - bool KoehnAligned(int i, int j) const { - return IsOneOrBothUnaligned(i, j) && IsNeighborAligned(i, j); - } - - typedef bool (RefineCommand::*Predicate)(int i, int j) const; - - protected: - void InitRefine( - const Array2D& a, - const Array2D& b) { - res_.clear(); - EnsureSize(a, b, &res_); - in_.clear(); un_.clear(); is_i_aligned_.clear(); is_j_aligned_.clear(); - EnsureSize(a, b, &in_); - EnsureSize(a, b, &un_); - is_i_aligned_.resize(res_.width(), false); - is_j_aligned_.resize(res_.height(), false); - for (int i = 0; i < in_.width(); ++i) - for (int j = 0; j < in_.height(); ++j) { - un_(i, j) = Safe(a, i, j) || Safe(b, i, j); - in_(i, j) = Safe(a, i, j) && Safe(b, i, j); - if (in_(i, j)) Align(i, j); - } - } - // "grow" the resulting alignment using the points in adds - // if they match the constraints determined by pred - void Grow(Predicate pred, bool idempotent, const Array2D& adds) { - if (idempotent) { - for (int i = 0; i < adds.width(); ++i) - for (int j = 0; j < adds.height(); ++j) { - if (adds(i, j) && !res_(i, j) && - (this->*pred)(i, j)) Align(i, j); - } - return; - } - set > p; - for (int i = 0; i < adds.width(); ++i) - for (int j = 0; j < adds.height(); ++j) - if (adds(i, j) && !res_(i, j)) - p.insert(make_pair(i, j)); - bool keep_going = !p.empty(); - while (keep_going) { - keep_going = false; - for (set >::iterator pi = p.begin(); - pi != p.end(); ++pi) { - if ((this->*pred)(pi->first, pi->second)) { - Align(pi->first, pi->second); - p.erase(pi); - keep_going = true; - } - } - } - } - Array2D res_; // refined alignment - Array2D in_; // intersection alignment - Array2D un_; // union alignment - vector is_i_aligned_; - vector is_j_aligned_; - vector > neighbors_; -}; - -struct DiagCommand : public RefineCommand { - DiagCommand() { - neighbors_.push_back(make_pair(1,1)); - neighbors_.push_back(make_pair(-1,1)); - neighbors_.push_back(make_pair(1,-1)); - neighbors_.push_back(make_pair(-1,-1)); - } -}; - -struct GDCommand : public DiagCommand { - string Name() const { return "grow-diag"; } - void Apply(const Array2D& a, const Array2D& b, Array2D* x) { - InitRefine(a, b); - Grow(&RefineCommand::KoehnAligned, false, un_); - *x = res_; - } -}; - -struct GDFCommand : public DiagCommand { - string Name() const { return "grow-diag-final"; } - void Apply(const Array2D& a, const Array2D& b, Array2D* x) { - InitRefine(a, b); - Grow(&RefineCommand::KoehnAligned, false, un_); - Grow(&RefineCommand::IsOneOrBothUnaligned, true, a); - Grow(&RefineCommand::IsOneOrBothUnaligned, true, b); - *x = res_; - } -}; - -struct GDFACommand : public DiagCommand { - string Name() const { return "grow-diag-final-and"; } - void Apply(const Array2D& a, const Array2D& b, Array2D* x) { - InitRefine(a, b); - Grow(&RefineCommand::KoehnAligned, false, un_); - Grow(&RefineCommand::IsNeitherAligned, true, a); - Grow(&RefineCommand::IsNeitherAligned, true, b); - *x = res_; - } -}; - -map > commands; - -void InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - ostringstream os; - os << "[REQ] Operation to perform:"; - for (map >::iterator it = commands.begin(); - it != commands.end(); ++it) { - os << ' ' << it->first; - } - string cstr = os.str(); - opts.add_options() - ("input_1,i", po::value(), "[REQUIRED] Alignment 1 file, - for STDIN") - ("input_2,j", po::value(), "Alignment 2 file, - for STDIN") - ("command,c", po::value()->default_value("convert"), cstr.c_str()) - ("help,h", "Print this help message and exit"); - po::options_description clo("Command line options"); - po::options_description dcmdline_options; - dcmdline_options.add(opts); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - po::notify(*conf); - - if (conf->count("help") || conf->count("input_1") == 0 || conf->count("command") == 0) { - cerr << dcmdline_options << endl; - exit(1); - } - const string cmd = (*conf)["command"].as(); - if (commands.count(cmd) == 0) { - cerr << "Don't understand command: " << cmd << endl; - exit(1); - } - if (commands[cmd]->RequiresTwoOperands()) { - if (conf->count("input_2") == 0) { - cerr << "Command '" << cmd << "' requires two alignment files\n"; - exit(1); - } - if ((*conf)["input_1"].as() == "-" && (*conf)["input_2"].as() == "-") { - cerr << "Both inputs cannot be STDIN\n"; - exit(1); - } - } else { - if (conf->count("input_2") != 0) { - cerr << "Command '" << cmd << "' requires only one alignment file\n"; - exit(1); - } - } -} - -template static void AddCommand() { - C* c = new C; - commands[c->Name()].reset(c); -} - -int main(int argc, char **argv) { - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - AddCommand(); - po::variables_map conf; - InitCommandLine(argc, argv, &conf); - Command& cmd = *commands[conf["command"].as()]; - boost::shared_ptr rf1(new ReadFile(conf["input_1"].as())); - boost::shared_ptr rf2; - if (cmd.RequiresTwoOperands()) - rf2.reset(new ReadFile(conf["input_2"].as())); - istream* in1 = rf1->stream(); - istream* in2 = NULL; - if (rf2) in2 = rf2->stream(); - while(*in1) { - string line1; - string line2; - getline(*in1, line1); - if (in2) { - getline(*in2, line2); - if ((*in1 && !*in2) || (*in2 && !*in1)) { - cerr << "Mismatched number of lines!\n"; - exit(1); - } - } - if (line1.empty() && !*in1) break; - shared_ptr > out(new Array2D); - shared_ptr > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); - if (in2) { - shared_ptr > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); - cmd.Apply(*a1, *a2, out.get()); - } else { - Array2D dummy; - cmd.Apply(*a1, dummy, out.get()); - } - - if (cmd.Result() == 1) { - AlignmentPharaoh::SerializePharaohFormat(*out, &cout); - } - } - if (cmd.Result() == 2) - cmd.Summary(); - return 0; -} - diff --git a/utils/Makefile.am b/utils/Makefile.am index a1ea8270..6e0678de 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -1,5 +1,5 @@ -bin_PROGRAMS = reconstruct_weights +bin_PROGRAMS = reconstruct_weights atools noinst_PROGRAMS = ts phmt mfcr_test TESTS = ts phmt mfcr_test @@ -17,6 +17,8 @@ endif reconstruct_weights_SOURCES = reconstruct_weights.cc +atools_SOURCES = atools.cc + noinst_LIBRARIES = libutils.a libutils_a_SOURCES = \ diff --git a/utils/atools.cc b/utils/atools.cc new file mode 100644 index 00000000..c0a91731 --- /dev/null +++ b/utils/atools.cc @@ -0,0 +1,369 @@ +#include +#include +#include + +#include +#include +#include +#include + +#include "filelib.h" +#include "alignment_pharaoh.h" + +namespace po = boost::program_options; +using namespace std; +using boost::shared_ptr; + +struct Command { + virtual ~Command() {} + virtual string Name() const = 0; + + // returns 1 for alignment grid output [default] + // returns 2 if Summary() should be called [for AER, etc] + virtual int Result() const { return 1; } + + virtual bool RequiresTwoOperands() const { return true; } + virtual void Apply(const Array2D& a, const Array2D& b, Array2D* x) = 0; + void EnsureSize(const Array2D& a, const Array2D& b, Array2D* x) { + x->resize(max(a.width(), b.width()), max(a.height(), b.height())); + } + static bool Safe(const Array2D& a, int i, int j) { + if (i >= 0 && j >= 0 && i < a.width() && j < a.height()) + return a(i,j); + else + return false; + } + virtual void Summary() { assert(!"Summary should have been overridden"); } +}; + +// compute fmeasure, second alignment is reference, first is hyp +struct FMeasureCommand : public Command { + FMeasureCommand() : matches(), num_predicted(), num_in_ref() {} + int Result() const { return 2; } + string Name() const { return "fmeasure"; } + bool RequiresTwoOperands() const { return true; } + void Apply(const Array2D& hyp, const Array2D& ref, Array2D* x) { + (void) x; // AER just computes statistics, not an alignment + int i_len = ref.width(); + int j_len = ref.height(); + for (int i = 0; i < i_len; ++i) { + for (int j = 0; j < j_len; ++j) { + if (ref(i,j)) { + ++num_in_ref; + if (Safe(hyp, i, j)) ++matches; + } + } + } + for (int i = 0; i < hyp.width(); ++i) + for (int j = 0; j < hyp.height(); ++j) + if (hyp(i,j)) ++num_predicted; + } + void Summary() { + if (num_predicted == 0 || num_in_ref == 0) { + cerr << "Insufficient statistics to compute f-measure!\n"; + abort(); + } + const double prec = static_cast(matches) / num_predicted; + const double rec = static_cast(matches) / num_in_ref; + cout << "P: " << prec << endl; + cout << "R: " << rec << endl; + const double f = (2.0 * prec * rec) / (rec + prec); + cout << "F: " << f << endl; + } + int matches; + int num_predicted; + int num_in_ref; +}; + +struct DisplayCommand : public Command { + string Name() const { return "display"; } + bool RequiresTwoOperands() const { return false; } + void Apply(const Array2D& in, const Array2D&, Array2D* x) { + *x = in; + cout << *x << endl; + } +}; + +struct ConvertCommand : public Command { + string Name() const { return "convert"; } + bool RequiresTwoOperands() const { return false; } + void Apply(const Array2D& in, const Array2D&, Array2D* x) { + *x = in; + } +}; + +struct InvertCommand : public Command { + string Name() const { return "invert"; } + bool RequiresTwoOperands() const { return false; } + void Apply(const Array2D& in, const Array2D&, Array2D* x) { + Array2D& res = *x; + res.resize(in.height(), in.width()); + for (int i = 0; i < in.height(); ++i) + for (int j = 0; j < in.width(); ++j) + res(i, j) = in(j, i); + } +}; + +struct IntersectCommand : public Command { + string Name() const { return "intersect"; } + bool RequiresTwoOperands() const { return true; } + void Apply(const Array2D& a, const Array2D& b, Array2D* x) { + EnsureSize(a, b, x); + Array2D& res = *x; + for (int i = 0; i < a.width(); ++i) + for (int j = 0; j < a.height(); ++j) + res(i, j) = Safe(a, i, j) && Safe(b, i, j); + } +}; + +struct UnionCommand : public Command { + string Name() const { return "union"; } + bool RequiresTwoOperands() const { return true; } + void Apply(const Array2D& a, const Array2D& b, Array2D* x) { + EnsureSize(a, b, x); + Array2D& res = *x; + for (int i = 0; i < res.width(); ++i) + for (int j = 0; j < res.height(); ++j) + res(i, j) = Safe(a, i, j) || Safe(b, i, j); + } +}; + +struct RefineCommand : public Command { + RefineCommand() { + neighbors_.push_back(make_pair(1,0)); + neighbors_.push_back(make_pair(-1,0)); + neighbors_.push_back(make_pair(0,1)); + neighbors_.push_back(make_pair(0,-1)); + } + bool RequiresTwoOperands() const { return true; } + + void Align(int i, int j) { + res_(i, j) = true; + is_i_aligned_[i] = true; + is_j_aligned_[j] = true; + } + + bool IsNeighborAligned(int i, int j) const { + for (int k = 0; k < neighbors_.size(); ++k) { + const int di = neighbors_[k].first; + const int dj = neighbors_[k].second; + if (Safe(res_, i + di, j + dj)) + return true; + } + return false; + } + + bool IsNeitherAligned(int i, int j) const { + return !(is_i_aligned_[i] || is_j_aligned_[j]); + } + + bool IsOneOrBothUnaligned(int i, int j) const { + return !(is_i_aligned_[i] && is_j_aligned_[j]); + } + + bool KoehnAligned(int i, int j) const { + return IsOneOrBothUnaligned(i, j) && IsNeighborAligned(i, j); + } + + typedef bool (RefineCommand::*Predicate)(int i, int j) const; + + protected: + void InitRefine( + const Array2D& a, + const Array2D& b) { + res_.clear(); + EnsureSize(a, b, &res_); + in_.clear(); un_.clear(); is_i_aligned_.clear(); is_j_aligned_.clear(); + EnsureSize(a, b, &in_); + EnsureSize(a, b, &un_); + is_i_aligned_.resize(res_.width(), false); + is_j_aligned_.resize(res_.height(), false); + for (int i = 0; i < in_.width(); ++i) + for (int j = 0; j < in_.height(); ++j) { + un_(i, j) = Safe(a, i, j) || Safe(b, i, j); + in_(i, j) = Safe(a, i, j) && Safe(b, i, j); + if (in_(i, j)) Align(i, j); + } + } + // "grow" the resulting alignment using the points in adds + // if they match the constraints determined by pred + void Grow(Predicate pred, bool idempotent, const Array2D& adds) { + if (idempotent) { + for (int i = 0; i < adds.width(); ++i) + for (int j = 0; j < adds.height(); ++j) { + if (adds(i, j) && !res_(i, j) && + (this->*pred)(i, j)) Align(i, j); + } + return; + } + set > p; + for (int i = 0; i < adds.width(); ++i) + for (int j = 0; j < adds.height(); ++j) + if (adds(i, j) && !res_(i, j)) + p.insert(make_pair(i, j)); + bool keep_going = !p.empty(); + while (keep_going) { + keep_going = false; + for (set >::iterator pi = p.begin(); + pi != p.end(); ++pi) { + if ((this->*pred)(pi->first, pi->second)) { + Align(pi->first, pi->second); + p.erase(pi); + keep_going = true; + } + } + } + } + Array2D res_; // refined alignment + Array2D in_; // intersection alignment + Array2D un_; // union alignment + vector is_i_aligned_; + vector is_j_aligned_; + vector > neighbors_; +}; + +struct DiagCommand : public RefineCommand { + DiagCommand() { + neighbors_.push_back(make_pair(1,1)); + neighbors_.push_back(make_pair(-1,1)); + neighbors_.push_back(make_pair(1,-1)); + neighbors_.push_back(make_pair(-1,-1)); + } +}; + +struct GDCommand : public DiagCommand { + string Name() const { return "grow-diag"; } + void Apply(const Array2D& a, const Array2D& b, Array2D* x) { + InitRefine(a, b); + Grow(&RefineCommand::KoehnAligned, false, un_); + *x = res_; + } +}; + +struct GDFCommand : public DiagCommand { + string Name() const { return "grow-diag-final"; } + void Apply(const Array2D& a, const Array2D& b, Array2D* x) { + InitRefine(a, b); + Grow(&RefineCommand::KoehnAligned, false, un_); + Grow(&RefineCommand::IsOneOrBothUnaligned, true, a); + Grow(&RefineCommand::IsOneOrBothUnaligned, true, b); + *x = res_; + } +}; + +struct GDFACommand : public DiagCommand { + string Name() const { return "grow-diag-final-and"; } + void Apply(const Array2D& a, const Array2D& b, Array2D* x) { + InitRefine(a, b); + Grow(&RefineCommand::KoehnAligned, false, un_); + Grow(&RefineCommand::IsNeitherAligned, true, a); + Grow(&RefineCommand::IsNeitherAligned, true, b); + *x = res_; + } +}; + +map > commands; + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + ostringstream os; + os << "Operation to perform:"; + for (map >::iterator it = commands.begin(); + it != commands.end(); ++it) { + os << ' ' << it->first; + } + string cstr = os.str(); + opts.add_options() + ("input_1,i", po::value(), "[REQUIRED] Alignment 1 file, - for STDIN") + ("input_2,j", po::value(), "Alignment 2 file, - for STDIN") + ("command,c", po::value()->default_value("convert"), cstr.c_str()) + ("help,h", "Print this help message and exit"); + po::options_description clo("Command line options"); + po::options_description dcmdline_options; + dcmdline_options.add(opts); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + po::notify(*conf); + + if (conf->count("help") || conf->count("input_1") == 0 || conf->count("command") == 0) { + cerr << dcmdline_options << endl; + exit(1); + } + const string cmd = (*conf)["command"].as(); + if (commands.count(cmd) == 0) { + cerr << "Don't understand command: " << cmd << endl; + exit(1); + } + if (commands[cmd]->RequiresTwoOperands()) { + if (conf->count("input_2") == 0) { + cerr << "Command '" << cmd << "' requires two alignment files\n"; + exit(1); + } + if ((*conf)["input_1"].as() == "-" && (*conf)["input_2"].as() == "-") { + cerr << "Both inputs cannot be STDIN\n"; + exit(1); + } + } else { + if (conf->count("input_2") != 0) { + cerr << "Command '" << cmd << "' requires only one alignment file\n"; + exit(1); + } + } +} + +template static void AddCommand() { + C* c = new C; + commands[c->Name()].reset(c); +} + +int main(int argc, char **argv) { + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + AddCommand(); + po::variables_map conf; + InitCommandLine(argc, argv, &conf); + Command& cmd = *commands[conf["command"].as()]; + boost::shared_ptr rf1(new ReadFile(conf["input_1"].as())); + boost::shared_ptr rf2; + if (cmd.RequiresTwoOperands()) + rf2.reset(new ReadFile(conf["input_2"].as())); + istream* in1 = rf1->stream(); + istream* in2 = NULL; + if (rf2) in2 = rf2->stream(); + while(*in1) { + string line1; + string line2; + getline(*in1, line1); + if (in2) { + getline(*in2, line2); + if ((*in1 && !*in2) || (*in2 && !*in1)) { + cerr << "Mismatched number of lines!\n"; + exit(1); + } + } + if (line1.empty() && !*in1) break; + shared_ptr > out(new Array2D); + shared_ptr > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); + if (in2) { + shared_ptr > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); + cmd.Apply(*a1, *a2, out.get()); + } else { + Array2D dummy; + cmd.Apply(*a1, dummy, out.get()); + } + + if (cmd.Result() == 1) { + AlignmentPharaoh::SerializePharaohFormat(*out, &cout); + } + } + if (cmd.Result() == 2) + cmd.Summary(); + return 0; +} + -- cgit v1.2.3 From a38b3fa383412e56eb958db998662c026bc08f4b Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 17 Feb 2012 13:01:54 -0500 Subject: boost version checking, check for Eigen, get rid of old digamma stuff --- configure.ac | 21 +++++++++++++++------ training/em_utils.h | 24 ------------------------ training/model1.cc | 1 - training/mr_em_adapted_reduce.cc | 6 +++--- training/ttables.h | 4 ++-- utils/m.h | 6 ++++++ 6 files changed, 26 insertions(+), 36 deletions(-) delete mode 100644 training/em_utils.h (limited to 'training') diff --git a/configure.ac b/configure.ac index cd78ee72..aa79027f 100644 --- a/configure.ac +++ b/configure.ac @@ -9,7 +9,7 @@ esac AC_PROG_CC AC_PROG_CXX AC_LANG_CPLUSPLUS -BOOST_REQUIRE +BOOST_REQUIRE([1.44]) BOOST_PROGRAM_OPTIONS AC_ARG_ENABLE(mpi, [ --enable-mpi Build MPI binaries, assumes mpi.h is present ], @@ -38,7 +38,7 @@ then CPPFLAGS="$CPPFLAGS -I${with_cmph}/include" AC_CHECK_HEADER(cmph.h, - [AC_DEFINE([HAVE_CMPH], [], [flag for cmph perfect hashing library])], + [AC_DEFINE([HAVE_CMPH], [1], [flag for cmph perfect hashing library])], [AC_MSG_ERROR([Cannot find cmph library!])]) LDFLAGS="$LDFLAGS -L${with_cmph}/lib" @@ -46,6 +46,18 @@ then AM_CONDITIONAL([HAVE_CMPH], true) fi +if test "x$with_eigen" != 'xno' +then + SAVE_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I${with_eigen}" + + AC_CHECK_HEADER(Eigen, + [AC_DEFINE([HAVE_EIGEN], [1], [flag for Eigen linear algebra library])], + [AC_MSG_ERROR([Cannot find Eigen!])]) + + AM_CONDITIONAL([HAVE_EIGEN], true) +fi + #BOOST_THREADS CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" LDFLAGS="$LDFLAGS $BOOST_PROGRAM_OPTIONS_LDFLAGS" @@ -53,11 +65,8 @@ LDFLAGS="$LDFLAGS $BOOST_PROGRAM_OPTIONS_LDFLAGS" LIBS="$LIBS $BOOST_PROGRAM_OPTIONS_LIBS" # $BOOST_THREAD_LIBS" -AC_CHECK_HEADER(boost/math/special_functions/digamma.hpp, - [AC_DEFINE([HAVE_BOOST_DIGAMMA], [], [flag for boost::math::digamma])]) - AC_CHECK_HEADER(google/dense_hash_map, - [AC_DEFINE([HAVE_SPARSEHASH], [], [flag for google::dense_hash_map])]) + [AC_DEFINE([HAVE_SPARSEHASH], [1], [flag for google::dense_hash_map])]) AC_PROG_INSTALL GTEST_LIB_CHECK(1.0) diff --git a/training/em_utils.h b/training/em_utils.h deleted file mode 100644 index 37762978..00000000 --- a/training/em_utils.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _EM_UTILS_H_ -#define _EM_UTILS_H_ - -#include "config.h" -#ifdef HAVE_BOOST_DIGAMMA -#include -using boost::math::digamma; -#else -#warning Using Mark Johnsons digamma() -#include -inline double digamma(double x) { - double result = 0, xx, xx2, xx4; - assert(x > 0); - for ( ; x < 7; ++x) - result -= 1/x; - x -= 1.0/2.0; - xx = 1.0/x; - xx2 = xx*xx; - xx4 = xx2*xx2; - result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4; - return result; -} -#endif -#endif diff --git a/training/model1.cc b/training/model1.cc index 40249aa3..a87d388f 100644 --- a/training/model1.cc +++ b/training/model1.cc @@ -9,7 +9,6 @@ #include "filelib.h" #include "ttables.h" #include "tdict.h" -#include "em_utils.h" namespace po = boost::program_options; using namespace std; diff --git a/training/mr_em_adapted_reduce.cc b/training/mr_em_adapted_reduce.cc index d4c16a2f..f65b5440 100644 --- a/training/mr_em_adapted_reduce.cc +++ b/training/mr_em_adapted_reduce.cc @@ -10,7 +10,7 @@ #include "fdict.h" #include "weights.h" #include "sparse_vector.h" -#include "em_utils.h" +#include "m.h" using namespace std; namespace po = boost::program_options; @@ -63,11 +63,11 @@ void Maximize(const bool use_vb, assert(tot > 0.0); double ltot = log(tot); if (use_vb) - ltot = digamma(tot + total_event_types * alpha); + ltot = Md::digamma(tot + total_event_types * alpha); for (SparseVector::const_iterator it = counts.begin(); it != counts.end(); ++it) { if (use_vb) { - pc->set_value(it->first, NoZero(digamma(it->second + alpha) - ltot)); + pc->set_value(it->first, NoZero(Md::digamma(it->second + alpha) - ltot)); } else { pc->set_value(it->first, NoZero(log(it->second) - ltot)); } diff --git a/training/ttables.h b/training/ttables.h index 50d85a68..bf3351d2 100644 --- a/training/ttables.h +++ b/training/ttables.h @@ -4,9 +4,9 @@ #include #include +#include "m.h" #include "wordid.h" #include "tdict.h" -#include "em_utils.h" class TTable { public: @@ -39,7 +39,7 @@ class TTable { for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) tot += it->second + alpha; for (Word2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) - it->second = exp(digamma(it->second + alpha) - digamma(tot)); + it->second = exp(Md::digamma(it->second + alpha) - Md::digamma(tot)); } counts.clear(); } diff --git a/utils/m.h b/utils/m.h index b25248c2..5e45efee 100644 --- a/utils/m.h +++ b/utils/m.h @@ -3,6 +3,7 @@ #include #include +#include template struct M { @@ -81,6 +82,11 @@ struct M { } } + // digamma is the first derivative of the log-gamma function + static inline F digamma(const F& x) { + return boost::math::digamma(x); + } + }; typedef M Md; -- cgit v1.2.3 From d3ccf26cf501cb15ed300bc0ad17596a4e59fbeb Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sat, 18 Feb 2012 15:16:17 -0500 Subject: fix diagonal model --- configure.ac | 2 +- training/model1.cc | 29 +++++++++++++++++------------ 2 files changed, 18 insertions(+), 13 deletions(-) (limited to 'training') diff --git a/configure.ac b/configure.ac index aa79027f..026dad01 100644 --- a/configure.ac +++ b/configure.ac @@ -51,7 +51,7 @@ then SAVE_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I${with_eigen}" - AC_CHECK_HEADER(Eigen, + AC_CHECK_HEADER(Eigen/Dense, [AC_DEFINE([HAVE_EIGEN], [1], [flag for Eigen linear algebra library])], [AC_MSG_ERROR([Cannot find Eigen!])]) diff --git a/training/model1.cc b/training/model1.cc index a87d388f..73104304 100644 --- a/training/model1.cc +++ b/training/model1.cc @@ -4,6 +4,7 @@ #include #include +#include "m.h" #include "lattice.h" #include "stringlib.h" #include "filelib.h" @@ -13,11 +14,6 @@ namespace po = boost::program_options; using namespace std; -inline double log_poisson(unsigned x, const double& lambda) { - assert(lambda > 0.0); - return log(lambda) * x - lgamma(x + 1) - lambda; -} - bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() @@ -82,6 +78,7 @@ int main(int argc, char** argv) { TTable::Word2Word2Double was_viterbi; double tot_len_ratio = 0; double mean_srclen_multiplier = 0; + vector unnormed_a_i; for (int iter = 0; iter < ITERATIONS; ++iter) { const bool final_iteration = (iter == (ITERATIONS - 1)); cerr << "ITERATION " << (iter + 1) << (final_iteration ? " (FINAL)" : "") << endl; @@ -108,6 +105,8 @@ int main(int argc, char** argv) { assert(src.size() > 0); assert(trg.size() > 0); } + if (src.size() > unnormed_a_i.size()) + unnormed_a_i.resize(src.size()); if (iter == 0) tot_len_ratio += static_cast(trg.size()) / static_cast(src.size()); denom += trg.size(); @@ -125,13 +124,15 @@ int main(int argc, char** argv) { } double az = 0; if (favor_diagonal) { - for (int ta = 0; ta < src.size(); ++ta) - az += exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + for (int ta = 0; ta < src.size(); ++ta) { + unnormed_a_i[ta] = exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + az += unnormed_a_i[ta]; + } az /= prob_align_not_null; } for (int i = 1; i <= src.size(); ++i) { if (favor_diagonal) - prob_a_i = exp(-fabs(double(i) / src.size() - j_over_ts) * diagonal_tension) / az; + prob_a_i = unnormed_a_i[i-1] / az; probs[i] = tt.prob(src[i-1][0].label, f_j) * prob_a_i; sum += probs[i]; } @@ -202,7 +203,9 @@ int main(int argc, char** argv) { Lattice src, trg; LatticeTools::ConvertTextToLattice(ssrc, &src); LatticeTools::ConvertTextToLattice(strg, &trg); - double log_prob = log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier); + double log_prob = Md::log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier); + if (src.size() > unnormed_a_i.size()) + unnormed_a_i.resize(src.size()); // compute likelihood for (int j = 0; j < trg.size(); ++j) { @@ -216,13 +219,15 @@ int main(int argc, char** argv) { } double az = 0; if (favor_diagonal) { - for (int ta = 0; ta < src.size(); ++ta) - az += exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + for (int ta = 0; ta < src.size(); ++ta) { + unnormed_a_i[ta] = exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + az += unnormed_a_i[ta]; + } az /= prob_align_not_null; } for (int i = 1; i <= src.size(); ++i) { if (favor_diagonal) - prob_a_i = exp(-fabs(double(i) / src.size() - j_over_ts) * diagonal_tension) / az; + prob_a_i = unnormed_a_i[i-1] / az; sum += tt.prob(src[i-1][0].label, f_j) * prob_a_i; } log_prob += log(sum); -- cgit v1.2.3 From 2903e0a0daf941b20da812149f647cd4e0f4dd66 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sat, 18 Feb 2012 22:09:47 -0500 Subject: initial lbl_model stub --- training/Makefile.am | 4 ++ training/lbl_model.cc | 131 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 training/lbl_model.cc (limited to 'training') diff --git a/training/Makefile.am b/training/Makefile.am index d2f1ccc5..330341ac 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -1,5 +1,6 @@ bin_PROGRAMS = \ model1 \ + lbl_model \ test_ngram \ mr_em_map_adapter \ mr_em_adapted_reduce \ @@ -49,6 +50,9 @@ test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteva model1_SOURCES = model1.cc ttables.cc model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz +lbl_model_SOURCES = lbl_model.cc ttables.cc +lbl_model_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz + grammar_convert_SOURCES = grammar_convert.cc grammar_convert_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz diff --git a/training/lbl_model.cc b/training/lbl_model.cc new file mode 100644 index 00000000..72d80a56 --- /dev/null +++ b/training/lbl_model.cc @@ -0,0 +1,131 @@ +#include + +#include "config.h" +#ifndef HAVE_EIGEN + int main() { std::cerr << "Please rebuild with --with-eigen PATH\n"; return 1; } +#else + +#include + +#include +#include +#include + +#include "m.h" +#include "lattice.h" +#include "stringlib.h" +#include "filelib.h" +#include "tdict.h" + +namespace po = boost::program_options; +using namespace std; + +bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("iterations,i",po::value()->default_value(5),"Number of iterations of training") + ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (0 = uniform, >0 sharpens)") + ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (argc < 2 || conf->count("help")) { + cerr << "Usage " << argv[0] << " [OPTIONS] corpus.fr-en\n"; + cerr << dcmdline_options << endl; + return false; + } + return true; +} + +int main(int argc, char** argv) { + po::variables_map conf; + if (!InitCommandLine(argc, argv, &conf)) return 1; + const string fname = argv[argc - 1]; + const int ITERATIONS = conf["iterations"].as(); + const double diagonal_tension = conf["diagonal_tension"].as(); + string testset; + if (conf.count("testset")) testset = conf["testset"].as(); + + double tot_len_ratio = 0; + double mean_srclen_multiplier = 0; + vector unnormed_a_i; + for (int iter = 0; iter < ITERATIONS; ++iter) { + cerr << "ITERATION " << (iter + 1) << endl; + ReadFile rf(fname); + istream& in = *rf.stream(); + double likelihood = 0; + double denom = 0.0; + int lc = 0; + bool flag = false; + string line; + string ssrc, strg; + while(true) { + getline(in, line); + if (!in) break; + ++lc; + if (lc % 1000 == 0) { cerr << '.'; flag = true; } + if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } + ParseTranslatorInput(line, &ssrc, &strg); + Lattice src, trg; + LatticeTools::ConvertTextToLattice(ssrc, &src); + LatticeTools::ConvertTextToLattice(strg, &trg); + if (src.size() == 0 || trg.size() == 0) { + cerr << "Error: " << lc << "\n" << line << endl; + assert(src.size() > 0); + assert(trg.size() > 0); + } + if (src.size() > unnormed_a_i.size()) + unnormed_a_i.resize(src.size()); + if (iter == 0) + tot_len_ratio += static_cast(trg.size()) / static_cast(src.size()); + denom += trg.size(); + vector probs(src.size() + 1); + bool first_al = true; // used for write_alignments + for (int j = 0; j < trg.size(); ++j) { + const WordID& f_j = trg[j][0].label; + double sum = 0; + const double j_over_ts = double(j) / trg.size(); + double prob_a_i = 1.0 / src.size(); + double az = 0; + for (int ta = 0; ta < src.size(); ++ta) { + unnormed_a_i[ta] = exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); + az += unnormed_a_i[ta]; + } + for (int i = 1; i <= src.size(); ++i) { + prob_a_i = unnormed_a_i[i-1] / az; + probs[i] = 1; // tt.prob(src[i-1][0].label, f_j) * prob_a_i; + sum += probs[i]; + } + } + } + + // log(e) = 1.0 + double base2_likelihood = likelihood / log(2); + + if (flag) { cerr << endl; } + if (iter == 0) { + mean_srclen_multiplier = tot_len_ratio / lc; + cerr << "expected target length = source length * " << mean_srclen_multiplier << endl; + } + cerr << " log_e likelihood: " << likelihood << endl; + cerr << " log_2 likelihood: " << base2_likelihood << endl; + cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; + cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + } + return 0; +} + +#endif + -- cgit v1.2.3 From c4ffa6df1fdd89e3db9c6d3829b7b84edac20bcf Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sun, 19 Feb 2012 04:27:55 -0500 Subject: lbl preliminary clean up --- decoder/lattice.cc | 1 + training/lbl_model.cc | 84 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 55 insertions(+), 30 deletions(-) (limited to 'training') diff --git a/decoder/lattice.cc b/decoder/lattice.cc index e3631e59..89da3cd0 100644 --- a/decoder/lattice.cc +++ b/decoder/lattice.cc @@ -46,6 +46,7 @@ void LatticeTools::ConvertTextToLattice(const string& text, Lattice* pl) { Lattice& l = *pl; vector ids; TD::ConvertSentence(text, &ids); + l.clear(); l.resize(ids.size()); for (int i = 0; i < l.size(); ++i) l[i].push_back(LatticeArc(ids[i], 0.0, 1)); diff --git a/training/lbl_model.cc b/training/lbl_model.cc index 72d80a56..ccd29255 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -6,6 +6,7 @@ #else #include +#include #include #include @@ -20,10 +21,17 @@ namespace po = boost::program_options; using namespace std; +#define kDIMENSIONS 10 +typedef Eigen::Matrix RVector; +typedef Eigen::Matrix RTVector; +typedef Eigen::Matrix TMatrix; +vector r_src, r_trg; + bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() - ("iterations,i",po::value()->default_value(5),"Number of iterations of training") + ("input,i",po::value(),"Input file") + ("iterations,I",po::value()->default_value(1000),"Number of iterations of training") ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (0 = uniform, >0 sharpens)") ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model"); po::options_description clo("Command line options"); @@ -42,7 +50,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::notify(*conf); if (argc < 2 || conf->count("help")) { - cerr << "Usage " << argv[0] << " [OPTIONS] corpus.fr-en\n"; + cerr << "Usage " << argv[0] << " [OPTIONS] -i corpus.fr-en\n"; cerr << dcmdline_options << endl; return false; } @@ -52,33 +60,32 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; - const string fname = argv[argc - 1]; + const string fname = conf["input"].as(); const int ITERATIONS = conf["iterations"].as(); const double diagonal_tension = conf["diagonal_tension"].as(); + if (diagonal_tension < 0.0) { + cerr << "Invalid value for diagonal_tension: must be >= 0\n"; + return 1; + } string testset; if (conf.count("testset")) testset = conf["testset"].as(); - double tot_len_ratio = 0; - double mean_srclen_multiplier = 0; + int lc = 0; vector unnormed_a_i; - for (int iter = 0; iter < ITERATIONS; ++iter) { - cerr << "ITERATION " << (iter + 1) << endl; + string line; + string ssrc, strg; + bool flag = false; + Lattice src, trg; + set vocab_e; + { // read through corpus, initialize int map, check lines are good + cerr << "INITIAL READ OF " << fname << endl; ReadFile rf(fname); istream& in = *rf.stream(); - double likelihood = 0; - double denom = 0.0; - int lc = 0; - bool flag = false; - string line; - string ssrc, strg; - while(true) { - getline(in, line); - if (!in) break; + while(getline(in, line)) { ++lc; if (lc % 1000 == 0) { cerr << '.'; flag = true; } if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } ParseTranslatorInput(line, &ssrc, &strg); - Lattice src, trg; LatticeTools::ConvertTextToLattice(ssrc, &src); LatticeTools::ConvertTextToLattice(strg, &trg); if (src.size() == 0 || trg.size() == 0) { @@ -88,37 +95,54 @@ int main(int argc, char** argv) { } if (src.size() > unnormed_a_i.size()) unnormed_a_i.resize(src.size()); - if (iter == 0) - tot_len_ratio += static_cast(trg.size()) / static_cast(src.size()); + for (unsigned i = 0; i < trg.size(); ++i) { + assert(trg[i].size() == 1); + vocab_e.insert(trg[i][0].label); + } + } + } + if (flag) cerr << endl; + + // do optimization + for (int iter = 0; iter < ITERATIONS; ++iter) { + cerr << "ITERATION " << (iter + 1) << endl; + ReadFile rf(fname); + istream& in = *rf.stream(); + double likelihood = 0; + double denom = 0.0; + lc = 0; + flag = false; + while(true) { + getline(in, line); + if (!in) break; + ++lc; + if (lc % 1000 == 0) { cerr << '.'; flag = true; } + if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } + ParseTranslatorInput(line, &ssrc, &strg); + LatticeTools::ConvertTextToLattice(ssrc, &src); + LatticeTools::ConvertTextToLattice(strg, &trg); denom += trg.size(); vector probs(src.size() + 1); - bool first_al = true; // used for write_alignments for (int j = 0; j < trg.size(); ++j) { const WordID& f_j = trg[j][0].label; double sum = 0; const double j_over_ts = double(j) / trg.size(); - double prob_a_i = 1.0 / src.size(); double az = 0; for (int ta = 0; ta < src.size(); ++ta) { unnormed_a_i[ta] = exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); az += unnormed_a_i[ta]; } for (int i = 1; i <= src.size(); ++i) { - prob_a_i = unnormed_a_i[i-1] / az; + const double prob_a_i = unnormed_a_i[i-1] / az; + // TODO probs[i] = 1; // tt.prob(src[i-1][0].label, f_j) * prob_a_i; sum += probs[i]; } } } - - // log(e) = 1.0 - double base2_likelihood = likelihood / log(2); - if (flag) { cerr << endl; } - if (iter == 0) { - mean_srclen_multiplier = tot_len_ratio / lc; - cerr << "expected target length = source length * " << mean_srclen_multiplier << endl; - } + + const double base2_likelihood = likelihood / log(2); cerr << " log_e likelihood: " << likelihood << endl; cerr << " log_2 likelihood: " << base2_likelihood << endl; cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; -- cgit v1.2.3 From 9e45f895aaec5c7a2f362aa532ca5ca4325e102b Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 21 Feb 2012 11:53:01 -0500 Subject: basic lbl model, nothing to see here --- training/lbl_model.cc | 147 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 126 insertions(+), 21 deletions(-) (limited to 'training') diff --git a/training/lbl_model.cc b/training/lbl_model.cc index ccd29255..4759eedc 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -5,13 +5,18 @@ int main() { std::cerr << "Please rebuild with --with-eigen PATH\n"; return 1; } #else +#include +#include #include #include +#include // memset +#include #include #include #include +#include "array2d.h" #include "m.h" #include "lattice.h" #include "stringlib.h" @@ -21,7 +26,7 @@ namespace po = boost::program_options; using namespace std; -#define kDIMENSIONS 10 +#define kDIMENSIONS 25 typedef Eigen::Matrix RVector; typedef Eigen::Matrix RTVector; typedef Eigen::Matrix TMatrix; @@ -32,6 +37,8 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { opts.add_options() ("input,i",po::value(),"Input file") ("iterations,I",po::value()->default_value(1000),"Number of iterations of training") + ("eta,e", po::value()->default_value(0.1f), "Eta for SGD") + ("random_seed", po::value(), "Random seed") ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (0 = uniform, >0 sharpens)") ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model"); po::options_description clo("Command line options"); @@ -57,12 +64,19 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { return true; } +void Normalize(RVector* v) { + float norm = v->norm(); + *v /= norm; +} + int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; const string fname = conf["input"].as(); const int ITERATIONS = conf["iterations"].as(); + const float eta = conf["eta"].as(); const double diagonal_tension = conf["diagonal_tension"].as(); + bool SGD = true; if (diagonal_tension < 0.0) { cerr << "Invalid value for diagonal_tension: must be >= 0\n"; return 1; @@ -70,14 +84,15 @@ int main(int argc, char** argv) { string testset; if (conf.count("testset")) testset = conf["testset"].as(); - int lc = 0; + unsigned lc = 0; vector unnormed_a_i; string line; string ssrc, strg; bool flag = false; Lattice src, trg; - set vocab_e; + vector vocab_e; { // read through corpus, initialize int map, check lines are good + set svocab_e; cerr << "INITIAL READ OF " << fname << endl; ReadFile rf(fname); istream& in = *rf.stream(); @@ -97,13 +112,39 @@ int main(int argc, char** argv) { unnormed_a_i.resize(src.size()); for (unsigned i = 0; i < trg.size(); ++i) { assert(trg[i].size() == 1); - vocab_e.insert(trg[i][0].label); + svocab_e.insert(trg[i][0].label); } } + copy(svocab_e.begin(), svocab_e.end(), back_inserter(vocab_e)); } if (flag) cerr << endl; + cerr << "Number of target word types: " << vocab_e.size() << endl; + const float num_examples = lc; + + r_trg.resize(TD::NumWords() + 1); + r_src.resize(TD::NumWords() + 1); + if (conf.count("random_seed")) { + srand(conf["random_seed"].as()); + } else { + unsigned seed = time(NULL); + cerr << "Random seed: " << seed << endl; + srand(seed); + } + TMatrix t = TMatrix::Random() / 100.0; + for (unsigned i = 1; i < r_trg.size(); ++i) { + r_trg[i] = RVector::Random(); + r_src[i] = RVector::Random(); + r_trg[i][i % kDIMENSIONS] = 0.5; + r_src[i][(i-1) % kDIMENSIONS] = 0.5; + Normalize(&r_trg[i]); + Normalize(&r_src[i]); + } + vector > trg_pos(TD::NumWords() + 1); // do optimization + TMatrix g; + vector exp_src; + vector z_src; for (int iter = 0; iter < ITERATIONS; ++iter) { cerr << "ITERATION " << (iter + 1) << endl; ReadFile rf(fname); @@ -112,9 +153,8 @@ int main(int argc, char** argv) { double denom = 0.0; lc = 0; flag = false; - while(true) { - getline(in, line); - if (!in) break; + g *= 0; + while(getline(in, line)) { ++lc; if (lc % 1000 == 0) { cerr << '.'; flag = true; } if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } @@ -122,23 +162,86 @@ int main(int argc, char** argv) { LatticeTools::ConvertTextToLattice(ssrc, &src); LatticeTools::ConvertTextToLattice(strg, &trg); denom += trg.size(); - vector probs(src.size() + 1); - for (int j = 0; j < trg.size(); ++j) { - const WordID& f_j = trg[j][0].label; - double sum = 0; - const double j_over_ts = double(j) / trg.size(); - double az = 0; - for (int ta = 0; ta < src.size(); ++ta) { - unnormed_a_i[ta] = exp(-fabs(double(ta) / src.size() - j_over_ts) * diagonal_tension); - az += unnormed_a_i[ta]; + + exp_src.clear(); exp_src.resize(src.size(), TMatrix::Zero()); + z_src.clear(); z_src.resize(src.size(), 0.0); + Array2D exp_refs(src.size(), trg.size(), TMatrix::Zero()); + Array2D z_refs(src.size(), trg.size(), 0.0); + for (unsigned j = 0; j < trg.size(); ++j) + trg_pos[trg[j][0].label].insert(j); + + for (unsigned i = 0; i < src.size(); ++i) { + const RVector& r_s = r_src[src[i][0].label]; + const RTVector pred = r_s.transpose() * t; + TMatrix& exp_m = exp_src[i]; + double& z = z_src[i]; + for (unsigned k = 0; k < vocab_e.size(); ++k) { + const WordID v_k = vocab_e[k]; + const RVector& r_t = r_trg[v_k]; + const double dot_prod = pred * r_t; + const double u = exp(dot_prod); + z += u; + const TMatrix v = r_s * r_t.transpose() * u; + exp_m += v; + set& ref_locs = trg_pos[v_k]; + if (!ref_locs.empty()) { + for (set::iterator it = ref_locs.begin(); it != ref_locs.end(); ++it) { + TMatrix& exp_ref_ij = exp_refs(i, *it); + double& z_ref_ij = z_refs(i, *it); + z_ref_ij += u; + exp_ref_ij += v; + } + } + } + } + for (unsigned j = 0; j < trg.size(); ++j) + trg_pos[trg[j][0].label].clear(); + + // model expectations for a single target generation with + // uniform alignment prior + double m_z = 0; + TMatrix m_exp = TMatrix::Zero(); + for (unsigned i = 0; i < src.size(); ++i) { + m_exp += exp_src[i]; + m_z += z_src[i]; + } + m_exp /= m_z; + + Array2D al(src.size(), trg.size(), false); + for (unsigned j = 0; j < trg.size(); ++j) { + double ref_z = 0; + TMatrix ref_exp = TMatrix::Zero(); + int max_i = 0; + double max_s = -9999999; + for (unsigned i = 0; i < src.size(); ++i) { + ref_exp += exp_refs(i, j); + ref_z += z_refs(i, j); + if (log(z_refs(i, j)) > max_s) { + max_s = log(z_refs(i, j)); + max_i = i; + } + // TODO handle alignment prob + } + if (ref_z <= 0) { + cerr << "TRG=" << TD::Convert(trg[j][0].label) << endl; + cerr << " LINE=" << line << endl; + cerr << " REF_EXP=\n" << ref_exp << endl; + cerr << " M_EXP=\n" << m_exp << endl; + abort(); } - for (int i = 1; i <= src.size(); ++i) { - const double prob_a_i = unnormed_a_i[i-1] / az; - // TODO - probs[i] = 1; // tt.prob(src[i-1][0].label, f_j) * prob_a_i; - sum += probs[i]; + al(max_i, j) = true; + ref_exp /= ref_z; + g += m_exp - ref_exp; + likelihood += log(ref_z) - log(m_z); + if (SGD) { + t -= g * eta / num_examples; + g *= 0; + } else { + assert(!"not implemented"); } } + + if (iter == (ITERATIONS - 1) || lc == 28) { cerr << al << endl; } } if (flag) { cerr << endl; } @@ -147,7 +250,9 @@ int main(int argc, char** argv) { cerr << " log_2 likelihood: " << base2_likelihood << endl; cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + cerr << t << endl; } + cerr << "TRANSLATION MATRIX:" << endl << t << endl; return 0; } -- cgit v1.2.3 From c0e9dc2889b6beb039c5365ebd0af6486b7ec574 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 21 Feb 2012 17:51:44 -0500 Subject: use lbfgs --- training/Makefile.am | 2 +- training/lbl_model.cc | 33 ++++++++++++++++++++++++++++----- 2 files changed, 29 insertions(+), 6 deletions(-) (limited to 'training') diff --git a/training/Makefile.am b/training/Makefile.am index 330341ac..991ac210 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -50,7 +50,7 @@ test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteva model1_SOURCES = model1.cc ttables.cc model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz -lbl_model_SOURCES = lbl_model.cc ttables.cc +lbl_model_SOURCES = lbl_model.cc optimize.cc lbl_model_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz grammar_convert_SOURCES = grammar_convert.cc diff --git a/training/lbl_model.cc b/training/lbl_model.cc index 4759eedc..eb3e194d 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -16,6 +16,7 @@ #include #include +#include "optimize.h" #include "array2d.h" #include "m.h" #include "lattice.h" @@ -26,7 +27,7 @@ namespace po = boost::program_options; using namespace std; -#define kDIMENSIONS 25 +#define kDIMENSIONS 8 typedef Eigen::Matrix RVector; typedef Eigen::Matrix RTVector; typedef Eigen::Matrix TMatrix; @@ -69,6 +70,21 @@ void Normalize(RVector* v) { *v /= norm; } +void Flatten(const TMatrix& m, vector* v) { + unsigned c = 0; + v->resize(kDIMENSIONS * kDIMENSIONS); + for (unsigned i = 0; i < kDIMENSIONS; ++i) + for (unsigned j = 0; j < kDIMENSIONS; ++j) + (*v)[c++] = m(i,j); +} + +void Unflatten(const vector& v, TMatrix* m) { + unsigned c = 0; + for (unsigned i = 0; i < kDIMENSIONS; ++i) + for (unsigned j = 0; j < kDIMENSIONS; ++j) + (*m)(i, j) = v[c++]; +} + int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; @@ -76,7 +92,7 @@ int main(int argc, char** argv) { const int ITERATIONS = conf["iterations"].as(); const float eta = conf["eta"].as(); const double diagonal_tension = conf["diagonal_tension"].as(); - bool SGD = true; + bool SGD = false; if (diagonal_tension < 0.0) { cerr << "Invalid value for diagonal_tension: must be >= 0\n"; return 1; @@ -121,6 +137,7 @@ int main(int argc, char** argv) { cerr << "Number of target word types: " << vocab_e.size() << endl; const float num_examples = lc; + LBFGSOptimizer lbfgs(kDIMENSIONS * kDIMENSIONS, 100); r_trg.resize(TD::NumWords() + 1); r_src.resize(TD::NumWords() + 1); if (conf.count("random_seed")) { @@ -130,7 +147,7 @@ int main(int argc, char** argv) { cerr << "Random seed: " << seed << endl; srand(seed); } - TMatrix t = TMatrix::Random() / 100.0; + TMatrix t = TMatrix::Random() / 1024.0; for (unsigned i = 1; i < r_trg.size(); ++i) { r_trg[i] = RVector::Random(); r_src[i] = RVector::Random(); @@ -145,6 +162,8 @@ int main(int argc, char** argv) { TMatrix g; vector exp_src; vector z_src; + vector flat_g, flat_t; + Flatten(t, &flat_t); for (int iter = 0; iter < ITERATIONS; ++iter) { cerr << "ITERATION " << (iter + 1) << endl; ReadFile rf(fname); @@ -236,8 +255,6 @@ int main(int argc, char** argv) { if (SGD) { t -= g * eta / num_examples; g *= 0; - } else { - assert(!"not implemented"); } } @@ -250,6 +267,12 @@ int main(int argc, char** argv) { cerr << " log_2 likelihood: " << base2_likelihood << endl; cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + if (!SGD) { + Flatten(g, &flat_g); + lbfgs.Optimize(-likelihood, flat_g, &flat_t); + Unflatten(flat_t, &t); + if (lbfgs.HasConverged()) break; + } cerr << t << endl; } cerr << "TRANSLATION MATRIX:" << endl << t << endl; -- cgit v1.2.3 From dd16e83d4a593392465ee317c43ffc2c490add2e Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Wed, 22 Feb 2012 16:10:56 +0000 Subject: add regularization --- training/lbl_model.cc | 50 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 9 deletions(-) (limited to 'training') diff --git a/training/lbl_model.cc b/training/lbl_model.cc index eb3e194d..a114bba7 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -12,6 +12,7 @@ #include // memset #include +#include #include #include #include @@ -27,7 +28,7 @@ namespace po = boost::program_options; using namespace std; -#define kDIMENSIONS 8 +#define kDIMENSIONS 110 typedef Eigen::Matrix RVector; typedef Eigen::Matrix RTVector; typedef Eigen::Matrix TMatrix; @@ -38,8 +39,9 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { opts.add_options() ("input,i",po::value(),"Input file") ("iterations,I",po::value()->default_value(1000),"Number of iterations of training") + ("regularization_strength,C",po::value()->default_value(0.1),"L2 regularization strength (0 for no regularization)") ("eta,e", po::value()->default_value(0.1f), "Eta for SGD") - ("random_seed", po::value(), "Random seed") + ("random_seed,s", po::value(), "Random seed") ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (0 = uniform, >0 sharpens)") ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model"); po::options_description clo("Command line options"); @@ -67,6 +69,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { void Normalize(RVector* v) { float norm = v->norm(); + assert(norm > 0.0f); *v /= norm; } @@ -74,21 +77,42 @@ void Flatten(const TMatrix& m, vector* v) { unsigned c = 0; v->resize(kDIMENSIONS * kDIMENSIONS); for (unsigned i = 0; i < kDIMENSIONS; ++i) - for (unsigned j = 0; j < kDIMENSIONS; ++j) + for (unsigned j = 0; j < kDIMENSIONS; ++j) { + assert(boost::math::isnormal(m(i, j))); (*v)[c++] = m(i,j); + } } void Unflatten(const vector& v, TMatrix* m) { unsigned c = 0; for (unsigned i = 0; i < kDIMENSIONS; ++i) - for (unsigned j = 0; j < kDIMENSIONS; ++j) + for (unsigned j = 0; j < kDIMENSIONS; ++j) { + assert(boost::math::isnormal(v[c])); (*m)(i, j) = v[c++]; + } +} + +double ApplyRegularization(const double C, + const vector& weights, + vector* g) { + assert(weights.size() == g->size()); + double reg = 0; + for (size_t i = 0; i < weights.size(); ++i) { + const double& w_i = weights[i]; + double& g_i = (*g)[i]; + reg += C * w_i * w_i; + g_i += 2 * C * w_i; + } + return reg; } int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; const string fname = conf["input"].as(); + const float reg_strength = conf["regularization_strength"].as(); + const bool has_l2 = reg_strength; + assert(reg_strength >= 0.0f); const int ITERATIONS = conf["iterations"].as(); const float eta = conf["eta"].as(); const double diagonal_tension = conf["diagonal_tension"].as(); @@ -147,7 +171,7 @@ int main(int argc, char** argv) { cerr << "Random seed: " << seed << endl; srand(seed); } - TMatrix t = TMatrix::Random() / 1024.0; + TMatrix t = TMatrix::Random() / 50.0; for (unsigned i = 1; i < r_trg.size(); ++i) { r_trg[i] = RVector::Random(); r_src[i] = RVector::Random(); @@ -159,7 +183,7 @@ int main(int argc, char** argv) { vector > trg_pos(TD::NumWords() + 1); // do optimization - TMatrix g; + TMatrix g = TMatrix::Zero(); vector exp_src; vector z_src; vector flat_g, flat_t; @@ -265,11 +289,19 @@ int main(int argc, char** argv) { const double base2_likelihood = likelihood / log(2); cerr << " log_e likelihood: " << likelihood << endl; cerr << " log_2 likelihood: " << base2_likelihood << endl; - cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; - cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; + cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; if (!SGD) { Flatten(g, &flat_g); - lbfgs.Optimize(-likelihood, flat_g, &flat_t); + double obj = -likelihood; + if (has_l2) { + const double r = ApplyRegularization(reg_strength, + flat_t, + &flat_g); + obj += r; + cerr << " regularization: " << r << endl; + } + lbfgs.Optimize(obj, flat_g, &flat_t); Unflatten(flat_t, &t); if (lbfgs.HasConverged()) break; } -- cgit v1.2.3 From 2faca3e7b3b8e4eba6c036c635a5b23883e72337 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 24 Feb 2012 00:47:48 -0500 Subject: load embeddings from file --- training/lbl_model.cc | 69 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 6 deletions(-) (limited to 'training') diff --git a/training/lbl_model.cc b/training/lbl_model.cc index a114bba7..2af848b5 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -28,7 +28,7 @@ namespace po = boost::program_options; using namespace std; -#define kDIMENSIONS 110 +#define kDIMENSIONS 100 typedef Eigen::Matrix RVector; typedef Eigen::Matrix RTVector; typedef Eigen::Matrix TMatrix; @@ -40,7 +40,9 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { ("input,i",po::value(),"Input file") ("iterations,I",po::value()->default_value(1000),"Number of iterations of training") ("regularization_strength,C",po::value()->default_value(0.1),"L2 regularization strength (0 for no regularization)") - ("eta,e", po::value()->default_value(0.1f), "Eta for SGD") + ("eta", po::value()->default_value(0.1f), "Eta for SGD") + ("source_embeddings,f", po::value(), "File containing source embeddings (if unset, random vectors will be used)") + ("target_embeddings,e", po::value(), "File containing target embeddings (if unset, random vectors will be used)") ("random_seed,s", po::value(), "Random seed") ("diagonal_tension,T", po::value()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (0 = uniform, >0 sharpens)") ("testset,x", po::value(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model"); @@ -106,6 +108,59 @@ double ApplyRegularization(const double C, return reg; } +void LoadEmbeddings(const string& filename, vector* pv) { + vector& v = *pv; + cerr << "Reading embeddings from " << filename << " ...\n"; + ReadFile rf(filename); + istream& in = *rf.stream(); + string line; + unsigned lc = 0; + while(getline(in, line)) { + ++lc; + size_t cur = line.find(' '); + if (cur == string::npos || cur == 0) { + cerr << "Parse error reading line " << lc << ":\n" << line << endl; + abort(); + } + WordID w = TD::Convert(line.substr(0, cur)); + if (w >= v.size()) continue; + RVector& curv = v[w]; + line[cur] = 0; + size_t start = cur + 1; + cur = start + 1; + size_t c = 0; + while(cur < line.size()) { + if (line[cur] == ' ') { + line[cur] = 0; + curv[c++] = strtod(&line[start], NULL); + start = cur + 1; + cur = start; + if (c == kDIMENSIONS) break; + } + ++cur; + } + if (c < kDIMENSIONS && cur != start) { + if (cur < line.size()) line[cur] = 0; + curv[c++] = strtod(&line[start], NULL); + } + if (c != kDIMENSIONS) { + static bool first = true; + if (first) { + cerr << " read " << c << " dimensions from embedding file, but built with " << kDIMENSIONS << " (filling in with random values)\n"; + first = false; + } + for (; c < kDIMENSIONS; ++c) curv[c] = rand(); + } + if (c == kDIMENSIONS && cur != line.size()) { + static bool first = true; + if (first) { + cerr << " embedding file contains more dimensions than configured with, truncating.\n"; + first = false; + } + } + } +} + int main(int argc, char** argv) { po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; @@ -175,11 +230,11 @@ int main(int argc, char** argv) { for (unsigned i = 1; i < r_trg.size(); ++i) { r_trg[i] = RVector::Random(); r_src[i] = RVector::Random(); - r_trg[i][i % kDIMENSIONS] = 0.5; - r_src[i][(i-1) % kDIMENSIONS] = 0.5; - Normalize(&r_trg[i]); - Normalize(&r_src[i]); } + if (conf.count("source_embeddings")) + LoadEmbeddings(conf["source_embeddings"].as(), &r_src); + if (conf.count("target_embeddings")) + LoadEmbeddings(conf["target_embeddings"].as(), &r_trg); vector > trg_pos(TD::NumWords() + 1); // do optimization @@ -242,6 +297,8 @@ int main(int argc, char** argv) { // model expectations for a single target generation with // uniform alignment prior + // TODO: when using a non-uniform alignment, m_exp will be + // a function of j (below) double m_z = 0; TMatrix m_exp = TMatrix::Zero(); for (unsigned i = 0; i < src.size(); ++i) { -- cgit v1.2.3 From 54bcfb835232d190a5ab6f0bd825de8a50dae126 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Wed, 29 Feb 2012 01:12:40 -0500 Subject: cleanup, mpi-ify lblmodel --- training/lbl_model.cc | 179 +++++++------- utils/agenda.h | 140 ----------- utils/best.h | 32 --- utils/corpus_tools.cc | 62 +++++ utils/corpus_tools.h | 19 ++ utils/d_ary_heap.h | 568 --------------------------------------------- utils/ftoa.h | 403 -------------------------------- utils/int_or_pointer.h | 70 ------ utils/intern_pool.h | 158 ------------- utils/lvalue_pmap.h | 31 --- utils/max_plus.h | 201 ---------------- utils/maybe_update_bound.h | 17 -- utils/nan.h | 42 ---- utils/string_to.h | 314 ------------------------- 14 files changed, 178 insertions(+), 2058 deletions(-) delete mode 100644 utils/agenda.h delete mode 100644 utils/best.h create mode 100644 utils/corpus_tools.cc create mode 100644 utils/corpus_tools.h delete mode 100644 utils/d_ary_heap.h delete mode 100644 utils/ftoa.h delete mode 100644 utils/int_or_pointer.h delete mode 100644 utils/intern_pool.h delete mode 100644 utils/lvalue_pmap.h delete mode 100644 utils/max_plus.h delete mode 100644 utils/maybe_update_bound.h delete mode 100644 utils/nan.h delete mode 100644 utils/string_to.h (limited to 'training') diff --git a/training/lbl_model.cc b/training/lbl_model.cc index 2af848b5..def5075a 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -12,11 +12,17 @@ #include // memset #include +#ifdef HAVE_MPI +#include +#include +namespace mpi = boost::mpi; +#endif #include #include #include #include +#include "corpus_tools.h" #include "optimize.h" #include "array2d.h" #include "m.h" @@ -29,9 +35,9 @@ namespace po = boost::program_options; using namespace std; #define kDIMENSIONS 100 -typedef Eigen::Matrix RVector; -typedef Eigen::Matrix RTVector; -typedef Eigen::Matrix TMatrix; +typedef Eigen::Matrix RVector; +typedef Eigen::Matrix RTVector; +typedef Eigen::Matrix TMatrix; vector r_src, r_trg; bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { @@ -39,8 +45,8 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { opts.add_options() ("input,i",po::value(),"Input file") ("iterations,I",po::value()->default_value(1000),"Number of iterations of training") - ("regularization_strength,C",po::value()->default_value(0.1),"L2 regularization strength (0 for no regularization)") - ("eta", po::value()->default_value(0.1f), "Eta for SGD") + ("regularization_strength,C",po::value()->default_value(0.1),"L2 regularization strength (0 for no regularization)") + ("eta", po::value()->default_value(0.1f), "Eta for SGD") ("source_embeddings,f", po::value(), "File containing source embeddings (if unset, random vectors will be used)") ("target_embeddings,e", po::value(), "File containing target embeddings (if unset, random vectors will be used)") ("random_seed,s", po::value(), "Random seed") @@ -70,7 +76,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { } void Normalize(RVector* v) { - float norm = v->norm(); + double norm = v->norm(); assert(norm > 0.0f); *v /= norm; } @@ -80,7 +86,7 @@ void Flatten(const TMatrix& m, vector* v) { v->resize(kDIMENSIONS * kDIMENSIONS); for (unsigned i = 0; i < kDIMENSIONS; ++i) for (unsigned j = 0; j < kDIMENSIONS; ++j) { - assert(boost::math::isnormal(m(i, j))); + assert(boost::math::isfinite(m(i, j))); (*v)[c++] = m(i,j); } } @@ -89,7 +95,7 @@ void Unflatten(const vector& v, TMatrix* m) { unsigned c = 0; for (unsigned i = 0; i < kDIMENSIONS; ++i) for (unsigned j = 0; j < kDIMENSIONS; ++j) { - assert(boost::math::isnormal(v[c])); + assert(boost::math::isfinite(v[c])); (*m)(i, j) = v[c++]; } } @@ -162,14 +168,25 @@ void LoadEmbeddings(const string& filename, vector* pv) { } int main(int argc, char** argv) { +#ifdef HAVE_MPI + std::cerr << "**MPI enabled.\n"; + mpi::environment env(argc, argv); + mpi::communicator world; + const int size = world.size(); + const int rank = world.rank(); +#else + std::cerr << "**MPI disabled.\n"; + const int rank = 0; + const int size = 1; +#endif po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; const string fname = conf["input"].as(); - const float reg_strength = conf["regularization_strength"].as(); + const double reg_strength = conf["regularization_strength"].as(); const bool has_l2 = reg_strength; assert(reg_strength >= 0.0f); const int ITERATIONS = conf["iterations"].as(); - const float eta = conf["eta"].as(); + const double eta = conf["eta"].as(); const double diagonal_tension = conf["diagonal_tension"].as(); bool SGD = false; if (diagonal_tension < 0.0) { @@ -181,61 +198,44 @@ int main(int argc, char** argv) { unsigned lc = 0; vector unnormed_a_i; - string line; - string ssrc, strg; bool flag = false; - Lattice src, trg; + vector > srcs, trgs; vector vocab_e; - { // read through corpus, initialize int map, check lines are good - set svocab_e; - cerr << "INITIAL READ OF " << fname << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); - while(getline(in, line)) { - ++lc; - if (lc % 1000 == 0) { cerr << '.'; flag = true; } - if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } - ParseTranslatorInput(line, &ssrc, &strg); - LatticeTools::ConvertTextToLattice(ssrc, &src); - LatticeTools::ConvertTextToLattice(strg, &trg); - if (src.size() == 0 || trg.size() == 0) { - cerr << "Error: " << lc << "\n" << line << endl; - assert(src.size() > 0); - assert(trg.size() > 0); - } - if (src.size() > unnormed_a_i.size()) - unnormed_a_i.resize(src.size()); - for (unsigned i = 0; i < trg.size(); ++i) { - assert(trg[i].size() == 1); - svocab_e.insert(trg[i][0].label); - } - } + { + set svocab_e, svocab_f; + CorpusTools::ReadFromFile(fname, &srcs, NULL, &trgs, &svocab_e, rank, size); copy(svocab_e.begin(), svocab_e.end(), back_inserter(vocab_e)); } - if (flag) cerr << endl; cerr << "Number of target word types: " << vocab_e.size() << endl; - const float num_examples = lc; + const double num_examples = lc; - LBFGSOptimizer lbfgs(kDIMENSIONS * kDIMENSIONS, 100); + boost::shared_ptr lbfgs; + if (rank == 0) + lbfgs.reset(new LBFGSOptimizer(kDIMENSIONS * kDIMENSIONS, 100)); r_trg.resize(TD::NumWords() + 1); r_src.resize(TD::NumWords() + 1); + vector > trg_pos(TD::NumWords() + 1); + if (conf.count("random_seed")) { srand(conf["random_seed"].as()); } else { - unsigned seed = time(NULL); + unsigned seed = time(NULL) + rank * 100; cerr << "Random seed: " << seed << endl; srand(seed); } - TMatrix t = TMatrix::Random() / 50.0; - for (unsigned i = 1; i < r_trg.size(); ++i) { - r_trg[i] = RVector::Random(); - r_src[i] = RVector::Random(); + + TMatrix t; + if (rank == 0) { + t = TMatrix::Random() / 50.0; + for (unsigned i = 1; i < r_trg.size(); ++i) { + r_trg[i] = RVector::Random(); + r_src[i] = RVector::Random(); + } + if (conf.count("source_embeddings")) + LoadEmbeddings(conf["source_embeddings"].as(), &r_src); + if (conf.count("target_embeddings")) + LoadEmbeddings(conf["target_embeddings"].as(), &r_trg); } - if (conf.count("source_embeddings")) - LoadEmbeddings(conf["source_embeddings"].as(), &r_src); - if (conf.count("target_embeddings")) - LoadEmbeddings(conf["target_embeddings"].as(), &r_trg); - vector > trg_pos(TD::NumWords() + 1); // do optimization TMatrix g = TMatrix::Zero(); @@ -243,22 +243,25 @@ int main(int argc, char** argv) { vector z_src; vector flat_g, flat_t; Flatten(t, &flat_t); - for (int iter = 0; iter < ITERATIONS; ++iter) { + bool converged = false; + // TODO broadcast embeddings + for (int iter = 0; !converged && iter < ITERATIONS; ++iter) { +#ifdef HAVE_MPI + mpi::broadcast(world, &flat_t[0], flat_t.size(), 0); +#endif + Unflatten(flat_t, &t); cerr << "ITERATION " << (iter + 1) << endl; - ReadFile rf(fname); - istream& in = *rf.stream(); double likelihood = 0; double denom = 0.0; lc = 0; flag = false; g *= 0; - while(getline(in, line)) { + for (unsigned i = 0; i < srcs.size(); ++i) { + const vector& src = srcs[i]; + const vector& trg = trgs[i]; ++lc; - if (lc % 1000 == 0) { cerr << '.'; flag = true; } - if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } - ParseTranslatorInput(line, &ssrc, &strg); - LatticeTools::ConvertTextToLattice(ssrc, &src); - LatticeTools::ConvertTextToLattice(strg, &trg); + if (rank == 0 && lc % 1000 == 0) { cerr << '.'; flag = true; } + if (rank == 0 && lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; } denom += trg.size(); exp_src.clear(); exp_src.resize(src.size(), TMatrix::Zero()); @@ -266,10 +269,10 @@ int main(int argc, char** argv) { Array2D exp_refs(src.size(), trg.size(), TMatrix::Zero()); Array2D z_refs(src.size(), trg.size(), 0.0); for (unsigned j = 0; j < trg.size(); ++j) - trg_pos[trg[j][0].label].insert(j); + trg_pos[trg[j]].insert(j); for (unsigned i = 0; i < src.size(); ++i) { - const RVector& r_s = r_src[src[i][0].label]; + const RVector& r_s = r_src[src[i]]; const RTVector pred = r_s.transpose() * t; TMatrix& exp_m = exp_src[i]; double& z = z_src[i]; @@ -293,7 +296,7 @@ int main(int argc, char** argv) { } } for (unsigned j = 0; j < trg.size(); ++j) - trg_pos[trg[j][0].label].clear(); + trg_pos[trg[j]].clear(); // model expectations for a single target generation with // uniform alignment prior @@ -323,8 +326,8 @@ int main(int argc, char** argv) { // TODO handle alignment prob } if (ref_z <= 0) { - cerr << "TRG=" << TD::Convert(trg[j][0].label) << endl; - cerr << " LINE=" << line << endl; + cerr << "TRG=" << TD::Convert(trg[j]) << endl; + cerr << " LINE=" << lc << " (RANK=" << rank << "/" << size << ")" << endl; cerr << " REF_EXP=\n" << ref_exp << endl; cerr << " M_EXP=\n" << m_exp << endl; abort(); @@ -339,30 +342,42 @@ int main(int argc, char** argv) { } } - if (iter == (ITERATIONS - 1) || lc == 28) { cerr << al << endl; } + if (rank == 0 && (iter == (ITERATIONS - 1) || lc < 12)) { cerr << al << endl; } } - if (flag) { cerr << endl; } + if (flag && rank == 0) { cerr << endl; } - const double base2_likelihood = likelihood / log(2); - cerr << " log_e likelihood: " << likelihood << endl; - cerr << " log_2 likelihood: " << base2_likelihood << endl; - cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; - cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + double obj = 0; if (!SGD) { Flatten(g, &flat_g); - double obj = -likelihood; - if (has_l2) { - const double r = ApplyRegularization(reg_strength, - flat_t, - &flat_g); - obj += r; - cerr << " regularization: " << r << endl; + obj = -likelihood; + // TODO - reduce gradient + } + + if (rank == 0) { + double gn = 0; + for (unsigned i = 0; i < flat_g.size(); ++i) + gn += flat_g[i]*flat_g[i]; + const double base2_likelihood = likelihood / log(2); + cerr << " log_e likelihood: " << likelihood << endl; + cerr << " log_2 likelihood: " << base2_likelihood << endl; + cerr << " cross entropy: " << (-base2_likelihood / denom) << endl; + cerr << " perplexity: " << pow(2.0, -base2_likelihood / denom) << endl; + cerr << " gradient norm: " << sqrt(gn) << endl; + if (!SGD) { + if (has_l2) { + const double r = ApplyRegularization(reg_strength, + flat_t, + &flat_g); + obj += r; + cerr << " regularization: " << r << endl; + } + lbfgs->Optimize(obj, flat_g, &flat_t); + converged = (lbfgs->HasConverged()); } - lbfgs.Optimize(obj, flat_g, &flat_t); - Unflatten(flat_t, &t); - if (lbfgs.HasConverged()) break; } - cerr << t << endl; +#ifdef HAVE_MPI + mpi::broadcast(world, converged, 0); +#endif } cerr << "TRANSLATION MATRIX:" << endl << t << endl; return 0; diff --git a/utils/agenda.h b/utils/agenda.h deleted file mode 100644 index d4f13696..00000000 --- a/utils/agenda.h +++ /dev/null @@ -1,140 +0,0 @@ -#ifndef AGENDA_H -#define AGENDA_H - -#define DBG_AGENDA(x) x -/* - a priority queue where you expect to queue the same item at different - priorities several times before finally popping it. higher priority = better. - so in best first you'd be using negative cost or e^-cost (probabilities, in - other words). - - this means you have a way to look up a key and see its location in the queue, - so its priority can be adjusted (or, simpler implementation: so when you pop, - you see if you've already popped before at a lower cost, and skip the - subsequent pops). - - it's assumed that you'll never queue an item @ a better priority after it has - already been popped. that is, the agenda will track already completed items. - maybe in the future i will let you recompute a cheaper way to reach things - after first-pop also, it's assumed that we're always improving prios of - existing items, never making them worse (even though technically this is - possible and sensible if it hasn't been popped yet). - - simple binary max heap for now. there are better practical options w/ - superior cache locaility. movements in the heap need to update a record for - that key of where the key went. i do this by creating canonical key pointers - out of boost object pools (if the key were lightweight e.g. an int, then it - would make sense to use the hash lookup too - - since i'm doing key hashing to start with, i also allow you to attach some - arbitrary data (value) payload beyond key+priority. - - hash map from key to done (has been popped) -> set where doneness is marked in key item? - - a slightly different way to make an adjustable heap would be to use - tree-structured parent/children links intrusively (or mapped by key) in the - key, rather than indices in a compact binary-tree heap - - */ - -#include "best.h" -#include "intern_pool.h" -#include "d_ary_heap.h" -#include "lvalue_pmap.h" -#include -#include - -/* -template -struct priority_traits { - typedef typename P::priority_type priority_type; -}; -*/ - -typedef best_t agenda_best_t; -typedef unsigned agenda_location_t; - -PMAP_MEMBER_INDIRECT(LocationMap,agenda_location_t,location) -PMAP_MEMBER_INDIRECT(PriorityMap,agenda_best_t,priority) - -struct Less { - typedef bool result_type; - template - bool operator()(A const& a,B const& b) const { return a,class HashKey=boost::hash,class EqKey=std::equal_to, class Pool=boost::object_pool > -struct Agenda : intern_pool { - typedef intern_pool Intern; // inherited because I want to use construct() - /* this is less generic than it could be, because I want to use a single hash mapping to intern to canonical mutable object pointers, where the property maps are just lvalue accessors */ - typedef typename KeyF::result_type Key; - typedef Item * Handle; - typedef LocationMap LocMap; - typedef PriorityMap PrioMap; - LocMap locmap; - PrioMap priomap; // note: priomap[item] is set by caller before giving us the item; then tracks best (for canonicalized item) thereafter - - Better better; - //NOT NEEDED: initialize function object state (there is none) - - typedef Item *ItemC; //canonicalized pointer - typedef Item *ItemP; - static const std::size_t heap_arity=4; // might be fastest possible (depends on key size probably - cache locality is bad w/ arity=2) - typedef std::vector HeapStorage; - typedef d_ary_heap_indirect Heap; - Heap q; - - // please don't call q.push etc. directly. - void add(ItemP i) { - bool fresh=interneq(i); - DBG_AGENDA(assert(fresh && !q.contains(i))); - q.push(i); - } - bool improve(ItemP i) { - ItemP c=i; - bool fresh=interneq(c); - if (fresh) { - add(c); - return true; - } - DBG_AGENDA(assert(q.contains(c))); - return q.maybe_improve(priomap[i]); - } - inline bool empty() { - return q.empty(); - } - // no need to destroy the canon. item because we want to remember the best cost and reject more expensive ways of using it). - ItemC pop() { - ItemC r=q.top(); - q.pop(); - return r; - } - void pop_discard() { - q.pop(); - } - - ItemC top() { - DBG_AGENDA(assert(!empty())); - return q.top(); - } - - agenda_best_t best() const { - return q.best(); //TODO: cache/track the global best? - } - - agenda_best_t second_best() const { - return q.second_best(); - } - - // add only if worse than queue current best, otherwise evaluate immediately (e.g. for early stopping w/ expensive to compute additional cost). return true if postponed (added) - bool postpone(ItemP i) { - if (better(priomap[i],best())) return false; - return improve(i); - } - - Agenda(unsigned reserve=1000000,LocMap const& lm=LocMap(),PrioMap const& pm=PrioMap(),EqKey const& eq=EqKey(),Better const& better=Better()) : locmap(lm), priomap(pm), better(better), q(priomap,locmap,better,reserve) { } -}; - -#endif diff --git a/utils/best.h b/utils/best.h deleted file mode 100644 index ed15e0be..00000000 --- a/utils/best.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef UTILS__BEST_H -#define UTILS__BEST_H - -#include "max_plus.h" - -typedef MaxPlus best_t; - -inline bool better(best_t const& a,best_t const& b) { - return a.v_>b.v_; // intentionally reversed, so default min-heap, sort, etc. put best first. -} - -inline bool operator <(best_t const& a,best_t const& b) { - return a.v_>b.v_; // intentionally reversed, so default min-heap, sort, etc. put best first. -} -struct BetterP { - inline bool operator ()(best_t const& a,best_t const& b) const { - return a.v_>b.v_; // intentionally reversed, so default min-heap, sort, etc. put best first. - } -}; - -inline void maybe_improve(best_t &a,best_t const& b) { - if (a.v_>b.v_) - a.v_=b.v_; -} - -template -inline void maybe_improve(best_t &a,O const& b) { - if (a.v_>b.v_) - a.v_=b.v_; -} - -#endif diff --git a/utils/corpus_tools.cc b/utils/corpus_tools.cc new file mode 100644 index 00000000..a0542b6e --- /dev/null +++ b/utils/corpus_tools.cc @@ -0,0 +1,62 @@ +#include "corpus_tools.h" + +#include + +#include "tdict.h" +#include "filelib.h" +#include "verbose.h" + +using namespace std; + +void CorpusTools::ReadFromFile(const string& filename, + vector >* src, + set* src_vocab, + vector >* trg, + set* trg_vocab, + int rank, + int size) { + assert(rank >= 0); + assert(size > 0); + assert(rank < size); + if (src) src->clear(); + if (src_vocab) src_vocab->clear(); + if (trg) trg->clear(); + if (trg_vocab) trg_vocab->clear(); + const int expected_fields = 1 + (trg == NULL ? 0 : 1); + if (!SILENT) cerr << "Reading from " << filename << " ...\n"; + ReadFile rf(filename); + istream& in = *rf.stream(); + string line; + int lc = 0; + static const WordID kDIV = TD::Convert("|||"); + vector tmp; + while(getline(in, line)) { + const bool skip = (lc % size != rank); + ++lc; + if (skip) continue; + TD::ConvertSentence(line, &tmp); + src->push_back(vector()); + vector* d = &src->back(); + set* v = src_vocab; + int s = 0; + for (unsigned i = 0; i < tmp.size(); ++i) { + if (tmp[i] == kDIV) { + ++s; + if (s > 1) { cerr << "Unexpected format in line " << lc << ": " << line << endl; abort(); } + assert(trg); + trg->push_back(vector()); + d = &trg->back(); + v = trg_vocab; + } else { + d->push_back(tmp[i]); + if (v) v->insert(tmp[i]); + } + } + ++s; + if (expected_fields != s) { + cerr << "Wrong number of fields in line " << lc << ": " << line << endl; abort(); + } + } +} + + diff --git a/utils/corpus_tools.h b/utils/corpus_tools.h new file mode 100644 index 00000000..97bdaa94 --- /dev/null +++ b/utils/corpus_tools.h @@ -0,0 +1,19 @@ +#ifndef _CORPUS_TOOLS_H_ +#define _CORPUS_TOOLS_H_ + +#include +#include +#include +#include "wordid.h" + +struct CorpusTools { + static void ReadFromFile(const std::string& filename, + std::vector >* src, + std::set* src_vocab = NULL, + std::vector >* trg = NULL, + std::set* trg_vocab = NULL, + int rank = 0, + int size = 1); +}; + +#endif diff --git a/utils/d_ary_heap.h b/utils/d_ary_heap.h deleted file mode 100644 index 1270638a..00000000 --- a/utils/d_ary_heap.h +++ /dev/null @@ -1,568 +0,0 @@ -#ifndef D_ARY_HEAP_H -#define D_ARY_HEAP_H - -#include "show.h" -#define DDARY(x) - -#define D_ARY_PUSH_GRAEHL 0 // untested -#define D_ARY_POP_GRAEHL 0 // untested -#define D_ARY_DOWN_GRAEHL 0 // untested -#define D_ARY_UP_GRAEHL 0 // untested -#define D_ARY_APPEND_ALWAYS_PUSH 1 // heapify (0) is untested. otherwise switch between push and heapify depending on size (cache effects, existing items vs. # appended ones) - -#define D_ARY_TRACK_OUT_OF_HEAP 0 // shouldn't need to track, because in contains() false positives looking up stale or random loc map values are impossible - we just check key. note: if you enable this, you must init location to D_ARY_HEAP_NULL_INDEX yourself until it's been added or popped -#define D_ARY_VERIFY_HEAP 1 -// This is a very expensive test so it should be disabled even when NDEBUG is not defined - -# undef D_ARY_HEAP_NULL_INDEX -# define D_ARY_HEAP_NULL_INDEX (-1) // you may init location to this. - -/* adapted from boost/graph/detail/d_ary_heap.hpp - - local modifications: - - clear, heapify, append range/container, Size type template arg, reserve constructor arg - - hole+move rather than swap. note: swap would be more efficient for heavyweight keys, until move ctors exist - - don't set locmap to -1 when removing from heap (waste of time) - - // unlike arity=2 case, you don't gain anything by having indices start at 1, with 0-based child indices - // root @1, A=2, children indices m={0,1}: parent(i)=i/2, child(i,m)=2*i+m - // root @0: parent(i)=(i-1)/A child(i,n)=i*A+n+1 - can't improve on this except child(i,m)=i*A+m - (integer division, a/b=floor(a/b), so (i-1)/A = ceil(i/A)-1, or greatest int less than (i/A)) - - actually, no need to adjust child index, since child is called only once and inline - - e.g. for A=3 gorn address in tree -> index - - () = root -> 0 - (1) -> 1 - (2) -> 2 - (3) (A) -> 3 - (1,1) -> (1*A+1) = 4 - (1,2) -> (1*A+2) = 5 - (1,3) -> (1*A+3) = 6 - (2,1) -> (2*A+1) = 7 - etc. - -//TODO: block-align siblings! assume data[0] is 16 or 32-byte aligned ... then we want root @ index (blocksize-1). see http://www.lamarca.org/anthony/pubs/heaps.pdf pg8. for pow2(e.g. 4)-ary heap, it may be reasonable to use root @index A-1. however, suppose the key size is not padded to a power of 2 (e.g. 12 bytes), then we would need internal gaps at times. would want to use compile const template based inlineable alignment math for this? possibly use a container like vector that lets you specify padding relative to some address multiple for v[0]. - - optimal D: see http://www.lamarca.org/anthony/pubs/heaps.pdf pg 9. depedns on relative cost of swap,compare, but in all cases except swap=free, 2 is worse than 3-4. for expensive swap (3x compare), 4 still as good as 5. so just use 4. boost benchmarking djikstra agrees; 4 is best. - - cache-aligned 4-heap speedup over regular 2-heap is 10-80% (for huge heaps, the speedup is more) - - splay/skew heaps are worse than 2heap or aligned 4heap in practice. - - //TODO: switch from heapify (Floyd's method) to repeated push past some size limit (in bytes) due to cache effect - - #define D_ARY_BYTES_OUT_OF_CACHE 0x1000000 - - //TODO: assuming locmap is an lvalue pmap, we can be more efficient. on the other hand, if it's an intrusive property map to an interned mutable object, there's no difference in performance, and that's what i'm going to do in my first uses. plus, if keys are indices and the map is a vector, it's barely any overhead. - - */ - -// -//======================================================================= -// Copyright 2009 Trustees of Indiana University -// Authors: Jeremiah J. Willcock, Andrew Lumsdaine -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -// - -#include -#include -#include -#include -#include -#include -#include -#include - - - // D-ary heap using an indirect compare operator (use identity_property_map - // as DistanceMap to get a direct compare operator). This heap appears to be - // commonly used for Dijkstra's algorithm for its good practical performance - // on some platforms; asymptotically, it's not optimal; it has an O(lg N) decrease-key - // operation, which is (amortized) constant time on a relaxed heap or fibonacci heap. The - // implementation is mostly based on the binary heap page on Wikipedia and - // online sources that state that the operations are the same for d-ary - // heaps. This code is not based on the old Boost d-ary heap code. - // - // - d_ary_heap_indirect is a model of UpdatableQueue as is needed for - // dijkstra_shortest_paths. - // - // - Value must model Assignable. - // - Arity must be at least 2 (optimal value appears to be 4, both in my and - // third-party experiments). - // - IndexInHeapMap must be a ReadWritePropertyMap from Value to - // Container::size_type (to store the index of each stored value within the - // heap for decrease-key aka update). - // - DistanceMap must be a ReadablePropertyMap from Value to something - // (typedef'ed as distance_type). - // - Compare must be a BinaryPredicate used as a less-than operator on - // distance_type. - // - Container must be a random-access, contiguous container (in practice, - // the operations used probably require that it is std::vector). - // - template , - typename Container = std::vector, - typename Size = typename Container::size_type, - typename Equal = std::equal_to > - class d_ary_heap_indirect { - BOOST_STATIC_ASSERT (Arity >= 2); - public: - typedef Container container_type; - typedef Size size_type; - typedef Value value_type; - typedef typename Container::const_iterator const_iterator; - typedef const_iterator iterator; - // The distances being compared using better and that are stored in the - // distance map - typedef typename boost::property_traits::value_type distance_type; - d_ary_heap_indirect(DistanceMap const& distance, - IndexInHeapPropertyMap const& index_in_heap, - const Better& better = Better(), - size_type container_reserve = 100000, - Equal const& equal = Equal() - ) - : better(better), data(), distance(distance), - index_in_heap(index_in_heap),equal(equal) { - data.reserve(container_reserve); - } - /* Implicit copy constructor */ - /* Implicit assignment operator */ - - template - void append_heapify(C const& c) { - data.reserve(data.size()+c.size()); - append_heapify(c.begin(),c.end()); - } - - template - void append_heapify(I begin,I end) { - data.insert(data.end(),begin,end); - heapify(); - } - - template - void append_push(C const& c) { - data.reserve(data.size()+c.size()); - append_push(c.begin(),c.end()); - } - - // past some threshold, this should be faster than append_heapify. also, if there are many existing elements it will be faster. - template - void append_push(I begin,I end) { - for (;begin!=end;++begin) - push(*begin); - } - - template - void append(C const& c) { - if (D_ARY_APPEND_ALWAYS_PUSH || data.size()>=c.size()/2) - append_push(c); - else - append_heapify(c); - } - - // past some threshold, this should be faster than append_heapify. also, if there are many existing elements it will be faster. - template - void append(I begin,I end) { - if (D_ARY_APPEND_ALWAYS_PUSH || data.size()>=0x10000) - append_push(begin,end); - else - append_heapify(begin,end); - } - - // could allow mutation of data directly, e.g. push_back 1 at a time - but then they could forget to heapify() - - //from bottom of heap tree up, turn that subtree into a heap by adjusting the root down - // for n=size, array elements indexed by floor(n/2) + 1, floor(n/2) + 2, ... , n are all leaves for the tree, thus each is an one-element heap already - // warning: this is many fewer instructions but, at some point (when heap doesn't fit in Lx cache) it will become slower than repeated push(). - void heapify() { - for (size_type i=parent(data.size()-1);i>0;--i) // starting from parent of last node, ending at first child of root (i==1) - preserve_heap_property_down(i); - } - - void reserve(size_type s) { - data.reserve(s); - } - - size_type size() const { - return data.size(); - } - - bool empty() const { - return data.empty(); - } - - const_iterator begin() const { - return data.begin(); - } - - const_iterator end() const { - return data.end(); - } - - void clear() { -#if D_ARY_TRACK_OUT_OF_HEAP - using boost::put; - for (typename Container::iterator i=data.begin(),e=data.end();i!=e;++i) - put(index_in_heap,*i,(size_type)D_ARY_HEAP_NULL_INDEX); -#endif - data.clear(); - } - - void push(const Value& v) { - if (D_ARY_PUSH_GRAEHL) { - size_type i = data.size(); - data.push_back(Value()); // (hoping default construct is cheap, construct-copy inline) - preserve_heap_property_up(v,i); // we don't have to recopy v, or init index_in_heap - } else { - size_type index = data.size(); - data.push_back(v); - using boost::put; - put(index_in_heap, v, index); - preserve_heap_property_up(index); - } - verify_heap(); - } - - Value& top() { - return data[0]; - } - - const Value& top() const { - return data[0]; - } - - void pop() { - using boost::put; - if(D_ARY_TRACK_OUT_OF_HEAP) - put(index_in_heap, data[0], (size_type)D_ARY_HEAP_NULL_INDEX); - if (data.size() != 1) { - if (D_ARY_POP_GRAEHL) { - preserve_heap_property_down(data.back(),0,data.size()-1); - data.pop_back(); - } else { - data[0] = data.back(); - put(index_in_heap, data[0], 0); - data.pop_back(); - preserve_heap_property_down(); - } - verify_heap(); - } else { - data.pop_back(); - } - } - - // This function assumes the key has been improved - // (distance has become smaller, so it may need to rise toward top(). - // i.e. decrease-key in a min-heap - void update(const Value& v) { - using boost::get; - size_type index = get(index_in_heap, v); - preserve_heap_property_up(v,index); - verify_heap(); - } - - // return true if improved. - bool maybe_improve(const Value& v,distance_type dbetter) { - using boost::get; - if (better(dbetter,get(distance,v))) { - preserve_heap_property_up_dist(v,dbetter); - return true; - } - return false; - } - - distance_type best(distance_type null=0) const { - return empty() ? null : get(distance,data[0]); - } - distance_type second_best(distance_type null=0) const { - if (data.size()<2) return null; - int m=std::min(data.size(),Arity+1); -// if (m>=Arity) m=Arity+1; - distance_type b=get(distance,data[1]); - for (int i=2;i=0 && i=0 check to catch uninit. data - } -#include "warning_pop.h" - - inline bool contains(const Value& v) const { - using boost::get; - return contains(v,get(index_in_heap, v)); - } - - void push_or_update(const Value& v) { /* insert if not present, else update */ - using boost::get; - size_type index = get(index_in_heap, v); - if (D_ARY_PUSH_GRAEHL) { - if (contains(v,index)) - preserve_heap_property_up(v,index); - else - push(v); - } else { - if (!contains(v,index)) { - index = data.size(); - data.push_back(v); - using boost::put; - put(index_in_heap, v, index); - } - preserve_heap_property_up(index); - } - verify_heap(); - } - - private: - Better better; - Container data; - DistanceMap distance; - IndexInHeapPropertyMap index_in_heap; - Equal equal; - - // Get the parent of a given node in the heap - static inline size_type parent(size_type index) { - return (index - 1) / Arity; - } - - // Get the child_idx'th child of a given node; 0 <= child_idx < Arity - static inline size_type child(size_type index, std::size_t child_idx) { - return index * Arity + child_idx + 1; - } - - // Swap two elements in the heap by index, updating index_in_heap - inline void swap_heap_elements(size_type index_a, size_type index_b) { - using std::swap; - Value value_a = data[index_a]; - Value value_b = data[index_b]; - data[index_a] = value_b; - data[index_b] = value_a; - using boost::put; - put(index_in_heap, value_a, index_b); - put(index_in_heap, value_b, index_a); - } - - inline void move_heap_element(Value const& v,size_type ito) { - using boost::put; - put(index_in_heap,v,ito); - data[ito]=v; //todo: move assign? - } - - // Verify that the array forms a heap; commented out by default - void verify_heap() const { - // This is a very expensive test so it should be disabled even when - // NDEBUG is not defined -#if D_ARY_VERIFY_HEAP - using boost::get; - for (size_t i = 1; i < data.size(); ++i) { - if (better(get(distance,data[i]), get(distance,data[parent(i)]))) { - assert (!"Element is smaller than its parent"); - } - } -#endif - } - - // we have a copy of the key, so we don't need to do that stupid find # of levels to move then move. we act as though data[index]=currently_being_moved, but in fact it's an uninitialized "hole", which we fill at the very end - inline void preserve_heap_property_up(Value const& currently_being_moved,size_type index) { - using boost::get; - preserve_heap_property_up(currently_being_moved,index,get(distance,currently_being_moved)); - } - - inline void preserve_heap_property_up_set_dist(Value const& currently_being_moved,distance_type dbetter) { - using boost::get; - using boost::put; - put(distance,currently_being_moved,dbetter); - preserve_heap_property_up(currently_being_moved,get(index_in_heap,currently_being_moved),dbetter); - verify_heap(); - } - - void preserve_heap_property_up(Value const& currently_being_moved,size_type index,distance_type currently_being_moved_dist) { - using boost::put; - using boost::get; - if (D_ARY_UP_GRAEHL) { - for (;;) { - if (index == 0) break; // Stop at root - size_type parent_index = parent(index); - Value const& parent_value = data[parent_index]; - if (better(currently_being_moved_dist, get(distance, parent_value))) { - move_heap_element(parent_value,index); - index = parent_index; - } else { - break; // Heap property satisfied - } - } - //finish "swap chain" by filling hole w/ currently_being_moved - move_heap_element(currently_being_moved,index); // note: it's ok not to return early on index==0 at start, even if self-assignment isn't supported by Value - because currently_being_moved is a copy. - } else { - put(index_in_heap,currently_being_moved,index); - put(distance,currently_being_moved,currently_being_moved_dist); - preserve_heap_property_up(index); - } - } - - // Starting at a node, move up the tree swapping elements to preserve the - // heap property. doesn't actually use swap; uses hole - void preserve_heap_property_up(size_type index) { - using boost::get; - if (index == 0) return; // Do nothing on root - if (D_ARY_UP_GRAEHL) { - Value copyi=data[index]; - preserve_heap_property_up(copyi,index); - return; - } - size_type orig_index = index; - size_type num_levels_moved = 0; - // The first loop just saves swaps that need to be done in order to avoid - // aliasing issues in its search; there is a second loop that does the - // necessary swap operations - Value currently_being_moved = data[index]; - distance_type currently_being_moved_dist = - get(distance, currently_being_moved); - for (;;) { - if (index == 0) break; // Stop at root - size_type parent_index = parent(index); - Value parent_value = data[parent_index]; - if (better(currently_being_moved_dist, get(distance, parent_value))) { - ++num_levels_moved; - index = parent_index; - continue; - } else { - break; // Heap property satisfied - } - } - // Actually do the moves -- move num_levels_moved elements down in the - // tree, then put currently_being_moved at the top - index = orig_index; - using boost::put; - for (size_type i = 0; i < num_levels_moved; ++i) { - size_type parent_index = parent(index); - Value parent_value = data[parent_index]; - put(index_in_heap, parent_value, index); - data[index] = parent_value; - index = parent_index; - } - data[index] = currently_being_moved; - put(index_in_heap, currently_being_moved, index); - verify_heap(); - } - - - // From the root, swap elements (each one with its smallest child) if there - // are any parent-child pairs that violate the heap property. v is placed at data[i], but then pushed down (note: data[i] won't be read explicitly; it will instead be overwritten by percolation). this also means that v must be a copy of data[i] if it was already at i. - // e.g. v=data.back(), i=0, sz=data.size()-1 for pop(), implicitly swapping data[i], data.back(), and doing data.pop_back(), then adjusting from 0 down w/ swaps. updates index_in_heap for v. - inline void preserve_heap_property_down(Value const& currently_being_moved,size_type i,size_type heap_size) { - using boost::get; - distance_type currently_being_moved_dist=get(distance,currently_being_moved); - Value* data_ptr = &data[0]; - size_type index = 0; // hole at index - currently_being_moved to be put here when we find the final hole spot - for (;;) { - size_type first_child_index = child(index, 0); - if (first_child_index >= heap_size) break; /* No children */ - Value* child_base_ptr = data_ptr + first_child_index; // using index of first_child_index+smallest_child_index because we hope optimizer will be smart enough to const-unroll a loop below if we do this. i think the optimizer would have gotten it even without our help (i.e. store root-relative index) - - // begin find best child index/distance - size_type smallest_child_index = 0; // don't add to base first_child_index every time we update which is smallest. - distance_type smallest_child_dist = get(distance, child_base_ptr[smallest_child_index]); -#undef D_ARY_MAYBE_IMPROVE_CHILD_I -#define D_ARY_MAYBE_IMPROVE_CHILD_I \ - distance_type i_dist = get(distance, child_base_ptr[i]); \ - if (better(i_dist, smallest_child_dist)) { \ - smallest_child_index = i; \ - smallest_child_dist = i_dist; \ - } - if (first_child_index + Arity <= heap_size) { - // avoid repeated heap_size boundcheck (should test if this is really a speedup - instruction cache tradeoff - could use upperbound = min(Arity,heap_size-first_child_index) instead. but this optimizes to a fixed number of iterations (compile time known) so probably worth it - for (size_t i = 1; i < Arity; ++i) { - D_ARY_MAYBE_IMPROVE_CHILD_I - } - } else { - for (size_t i = 1,e=heap_size - first_child_index; i < e; ++i) { - D_ARY_MAYBE_IMPROVE_CHILD_I - } - } - //end: know best child - - if (better(smallest_child_dist, currently_being_moved_dist)) { - // instead of swapping, move. - move_heap_element(child_base_ptr[smallest_child_index],index); // move up - index=first_child_index+smallest_child_index; // descend - hole is now here - } else { - move_heap_element(currently_being_moved,index); // finish "swap chain" by filling hole - break; - } - } - verify_heap(); - } - - inline void preserve_heap_property_down(size_type i) { - preserve_heap_property_down(data[i],i,data.size()); - } - - void preserve_heap_property_down() { - using boost::get; - if (data.empty()) return; - if (D_ARY_DOWN_GRAEHL) { // this *should* be more efficient because i avoid swaps. - Value copy0=data[0]; - preserve_heap_property_down(copy0,0,data.size()); - return; - } - size_type index = 0; - Value currently_being_moved = data[0]; - distance_type currently_being_moved_dist = - get(distance, currently_being_moved); - size_type heap_size = data.size(); - Value* data_ptr = &data[0]; - for (;;) { - size_type first_child_index = child(index, 0); - if (first_child_index >= heap_size) break; /* No children */ - Value* child_base_ptr = data_ptr + first_child_index; - size_type smallest_child_index = 0; - distance_type smallest_child_dist = get(distance, child_base_ptr[smallest_child_index]); - if (first_child_index + Arity <= heap_size) { - for (size_t i = 1; i < Arity; ++i) { // can be unrolled completely. - - D_ARY_MAYBE_IMPROVE_CHILD_I - } - } else { - for (size_t i = 1,e=heap_size - first_child_index; i < e; ++i) { - D_ARY_MAYBE_IMPROVE_CHILD_I - } - } - if (better(smallest_child_dist, currently_being_moved_dist)) { - swap_heap_elements(smallest_child_index + first_child_index, index); - index = smallest_child_index + first_child_index; - continue; - } else { - break; // Heap property satisfied - } - } - verify_heap(); - } - - }; - -#endif diff --git a/utils/ftoa.h b/utils/ftoa.h deleted file mode 100644 index 3dba528d..00000000 --- a/utils/ftoa.h +++ /dev/null @@ -1,403 +0,0 @@ -#ifndef FTOA_H -#define FTOA_H - - -//TODO: for fractional digits/non-sci, determine the right amount of left padding (more if the whole number is indeed <1, to keep the significant digits), less if sci notation and/or mantissa has sig. digits (don't want N before . and N after!) - -#ifndef FTOA_ROUNDTRIP -# define FTOA_ROUNDTRIP 1 -#endif - -#ifndef FTOA_DEBUG -# define FTOA_DEBUG 0 -#endif - -#ifndef FTOA_USE_SPRINTF -#define FTOA_USE_SPRINTF 0 -#endif - -#if FTOA_DEBUG -# define FTOAassert(x) assert(x) -# define DBFTOA(x) std::cerr<<"\nFTOA " <<__func__<<"("<<__LINE__<<"): " #x "="< -#include -#include -#include -#include -#include -#include "utoa.h" -#include "nan.h" - -template -struct ftoa_traits { -}; - -//eP10, -// sigd decimal places normally printed, roundtripd needed so that round-trip float->string->float is identity - -#define DEFINE_FTOA_TRAITS(FLOATT,INTT,sigd,roundtripd,small,large,used,P10) \ -template <> \ -struct ftoa_traits { \ - typedef INTT int_t; \ - typedef u ## INTT uint_t; \ - typedef FLOATT float_t; \ - enum { digits10=std::numeric_limits::digits10, chars_block=P10, usedig=used, sigdig=sigd, roundtripdig=roundtripd, bufsize=roundtripdig+7 }; \ - static const double pow10_block = 1e ## P10; \ - static const float_t small_f = small; \ - static const float_t large_f = large; \ - static inline int sprintf(char *buf,double f) { return std::sprintf(buf,"%." #used "g",f); } \ - static inline int sprintf_sci(char *buf,double f) { return std::sprintf(buf,"%." #used "e",f); } \ - static inline int sprintf_nonsci(char *buf,double f) { return std::sprintf(buf,"%." #used "f",f); } \ - static inline uint_t fracblock(double frac) { FTOAassert(frac>=0 && frac<1); double f=frac*pow10_block;uint_t i=(uint_t)f;FTOAassert(i=0 && frac<1); double f=frac*pow10_block;uint_t i=(uint_t)(f+.5);FTOAassert(ilarge; } \ - static inline bool use_sci(float_t f) { return use_sci_abs(std::fabs(f)); } \ -}; -//TODO: decide on computations in double (would hurt long double) or in native float type - any advantage? more precision is usually better. - -//10^22 = 0x1.0f0cf064dd592p73 is the largest exactly representable power of 10 in the binary64 format. but round down to 18 so int64_t can hold it. - -#if FTOA_ROUNDTRIP -#define DEFINE_FTOA_TRAITS_ROUNDTRIP(FLOATT,INTT,sigd,roundtripd,small,large) DEFINE_FTOA_TRAITS(FLOATT,INTT,sigd,roundtripd,small,large,roundtripd,roundtripd) -#else -#define DEFINE_FTOA_TRAITS_ROUNDTRIP(FLOATT,INTT,sigd,roundtripd,small,large) DEFINE_FTOA_TRAITS(FLOATT,INTT,sigd,roundtripd,small,large,sigd,sigd) -#endif - -DEFINE_FTOA_TRAITS_ROUNDTRIP(double,int64_t,15,17,1e-5,1e8) -//i've heard that 1e10 is fine for float. but we only have 1e9 (9 decimal places) in int32. -DEFINE_FTOA_TRAITS_ROUNDTRIP(float,int32_t,6,9,1e-3,1e8) - - -template -inline void ftoa_error(F f,char const* msg="") { - using namespace std; - cerr<<"ftoa error: "< -char *prepend_pos_frac_digits(char *p,F f) { - FTOAassert(f<1 && f >0); - typedef ftoa_traits FT; - //repeat if very small??? nah, require sci notation to take care of it. - typename FT::uint_t i=FT::rounded_fracblock(f); - DBFTOA2(f,i); - if (i>0) { - unsigned n_skipped; - char *d=utoa_drop_trailing_0(p,i,n_skipped); - char *b=p-FT::chars_block+n_skipped; - FTOAassert(b<=d); - left_pad(b,d,'0'); - return b; - } else { - return p; - } -} - -template -char *append_pos_frac_digits(char *p,F f) { // '0' right-padded, nul terminated, return position of nul. [p,ret) are the digits - if (f==0) { - *p++='0'; - return p; - } - FTOAassert(f<1 && f >0); - typedef ftoa_traits FT; - //repeat if very small??? nah, require sci notation to take care of it. - typename FT::uint_t i=FT::rounded_fracblock(f); - DBFTOA2(f,i); - if (i>0) { - char *e=p+FT::chars_block; - utoa_left_pad(p,e,i,'0'); - *e=0; - return e; - } else { - *p=0; - return p; - } -} - -template -inline char *prepend_pos_frac(char *p,F f) { - FTOAassert(f<1 && f>=0); - if (f==0) { - *--p='0'; - return p; - } - p=prepend_pos_frac_digits(p,f); - *--p='.'; - if (DECIMAL_FOR_WHOLE>0) - *--p='0'; - return p; -} - -template -inline char *append_pos_frac(char *p,F f) { - DBFTOA(f); - if (DECIMAL_FOR_WHOLE>0) - *p++='0'; - *p++='.'; - return append_pos_frac_digits(p,f); -} - -template -inline char *prepend_frac(char *p,F f,bool positive_sign=false) { - FTOAassert(f<1 && f>-1); - if (f==0) - *--p='0'; - else if (f<0) { - p=prepend_pos_frac(p,-f); - *--p='-'; - } else { - p=prepend_pos_frac(p,f); - if (positive_sign) - *--p='+'; - } - return p; -} - - -template -inline char *append_sign(char *p,F f,bool positive_sign=false) { - if (f<0) { - *p++='-'; - } else if (positive_sign) - *p++='+'; - return p; -} - -template -inline char *append_frac(char *p,F f,bool positive_sign=false) { - FTOAassert(f<1 && f>-1); - if (f==0) { - *p++='0'; - return p; - } else if (f<0) { - *p++='-'; - return append_pos_frac(p,-f); - } - if (positive_sign) { - *p++='+'; - return append_pos_frac(p,f); - } - -} - - -//append_frac, append_pos_sci, append_sci. notice these are all composed according to a pattern (but reversing order of composition in pre vs app). or can implement with copy through buffer - -/* will switch to sci notation if integer part is too big for the int type. but for very small values, will simply display 0 (i.e. //TODO: find out log10 and leftpad 0s then convert rest) */ -template -char *prepend_pos_nonsci(char *p,F f) { - typedef ftoa_traits FT; - typedef typename FT::uint_t uint_t; - DBFTOA(f); - FTOAassert(f>0); - if (f>std::numeric_limits::max()) - return prepend_pos_sci(p,f); - //which is faster - modf is weird and returns negative frac part if f is negative. while we could deal with this using fabs, we instead only handle positive here (put - sign in front and negate, then call us) - ? -#if 0 - F intpart; - F frac=std::modf(f,&intpart); - uint_t u=intpart; -#else - uint_t u=f; - F frac=f-u; -#endif - DBFTOA2(u,frac); - if (frac == 0) { - if (DECIMAL_FOR_WHOLE>1) - *--p='.'; - } else { - p=prepend_pos_frac_digits(p,frac); - *--p='.'; - } - if (u==0) { - if (DECIMAL_FOR_WHOLE>0) - *--p='0'; - } else - p=utoa(p,u); - return p; -} - -// modify p; return true if handled -template -inline bool prepend_0_etc(char *&p,F f,bool positive_sign=false) { - if (f==0) { - *--p='0'; - return true; - } - if (is_nan(f)) { - p-=3; - p[0]='N';p[1]='A';p[2]='N'; - return true; - } - if (is_pos_inf(f)) { - p-=3; - p[0]='I';p[1]='N';p[2]='F'; - if (positive_sign) - *--p='+'; - return true; - } - if (is_neg_inf(f)) { - p-=4; - p[0]='-';p[1]='I';p[2]='N';p[3]='F'; - return true; - } - return false; -} - -template -inline char *prepend_nonsci(char *p,F f,bool positive_sign=false) { - if (prepend_0_etc(p,f,positive_sign)) return p; - if (f<0) { - p=prepend_pos_nonsci(p,-f); - *--p='-'; - } else { - p=prepend_pos_nonsci(p,f); - if (positive_sign) - *--p='+'; - } - return p; -} - -template -inline char *prepend_pos_sci(char *p,F f,bool positive_sign_exp=false) { - FTOAassert(f>0); - typedef ftoa_traits FT; - int e10; - F mant=FT::mantexp10(f,e10); - DBFTOA(f); - DBFTOA2(mant,e10); - FTOAassert(mant<10.00001); - if (mant>=10.) { - ++e10; - mant*=.1; - } else if (mant < 1.) { - --e10; - mant*=10; - } - p=itoa(p,e10,positive_sign_exp); - *--p='e'; - return prepend_pos_nonsci(p,mant); -} - -template -inline char *prepend_sci(char *p,F f,bool positive_sign_mant=false,bool positive_sign_exp=false) { - if (prepend_0_etc(p,f,positive_sign_mant)) return p; - if (f==0) - *--p='0'; - else if (f<0) { - p=prepend_pos_sci(p,-f,positive_sign_exp); - *--p='-'; - } else { - p=prepend_pos_sci(p,f,positive_sign_exp); - if (positive_sign_mant) - *--p='+'; - } - return p; -} - -template -inline char *append_nonsci(char *p,F f,bool positive_sign=false) { - if (positive_sign&&f>=0) *p++='+'; - return p+ftoa_traits::sprintf_nonsci(p,f); -} - -template -inline char *append_sci(char *p,F f,bool positive_sign=false) { - if (positive_sign&&f>=0) *p++='+'; - return p+ftoa_traits::sprintf_sci(p,f); -} - -template -inline char *append_ftoa(char *p,F f,bool positive_sign=false) { - if (positive_sign&&f>=0) *p++='+'; - return p+ftoa_traits::sprintf(p,f); -} - -template -inline char *prepend_ftoa(char *p,F f) -{ - typedef ftoa_traits FT; - return FT::use_sci(f) ? prepend_sci(p,f) : prepend_nonsci(p,f); -} - -template -inline std::string ftos_append(F f) { - typedef ftoa_traits FT; - char buf[FT::bufsize]; - return std::string(buf,append_ftoa(buf,f)); -} - -template -inline std::string ftos_prepend(F f) { - typedef ftoa_traits FT; - char buf[FT::bufsize]; - char *end=buf+FT::bufsize; - return std::string(prepend_ftoa(end,f),end); -} - - -template -inline std::string ftos(F f) { -#if 0 - // trust RVO? no extra copies? - return FTOA_USE_SPRINTF ? ftos_append(f) : ftos_prepend(f); -#else - typedef ftoa_traits FT; - char buf[FT::bufsize]; - if (FTOA_USE_SPRINTF) { - return std::string(buf,append_ftoa(buf,f)); - } else { - char *end=buf+FT::bufsize; - return std::string(prepend_ftoa(end,f),end); - } -#endif -} - -namespace { - const int ftoa_bufsize=30; - char ftoa_outbuf[ftoa_bufsize]; -} - -// not even THREADLOCAL - don't use. -inline char *static_ftoa(float f) -{ - if (FTOA_USE_SPRINTF) { - append_ftoa(ftoa_outbuf,f); - return ftoa_outbuf; - } else { - char *end=ftoa_outbuf+ftoa_bufsize; - return prepend_ftoa(end,f); - } -} - - -#endif diff --git a/utils/int_or_pointer.h b/utils/int_or_pointer.h deleted file mode 100644 index 4b6a9e4a..00000000 --- a/utils/int_or_pointer.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef INT_OR_POINTER_H -#define INT_OR_POINTER_H - -// if you ever wanted to store a discriminated union of pointer/integer without an extra boolean flag, this will do it, assuming your pointers are never odd. - -// check lsb for expected tag? -#ifndef IOP_CHECK_LSB -# define IOP_CHECK_LSB 1 -#endif -#if IOP_CHECK_LSB -# define iop_assert(x) assert(x) -#else -# define iop_assert(x) -#endif - -#include -#include - -template -struct IntOrPointer { - typedef Pointed pointed_type; - typedef Int integer_type; - typedef Pointed *value_type; - typedef IntOrPointer self_type; - IntOrPointer(int j) { *this=j; } - IntOrPointer(size_t j) { *this=j; } - IntOrPointer(value_type v) { *this=v; } - bool is_integer() const { return i&1; } - bool is_pointer() const { return !(i&1); } - value_type & pointer() { return p; } - const value_type & pointer() const { iop_assert(is_pointer()); return p; } - integer_type integer() const { iop_assert(is_integer()); return i >> 1; } - void set_integer(Int j) { i=2*j+1; } - void set_pointer(value_type p_) { p=p_;iop_assert(is_pointer()); } - void operator=(unsigned j) { i = 2*(integer_type)j+1; } - void operator=(int j) { i = 2*(integer_type)j+1; } - template - void operator=(C j) { i = 2*(integer_type)j+1; } - void operator=(value_type v) { p=v; } - IntOrPointer() {} - IntOrPointer(const self_type &s) : p(s.p) {} - void operator=(const self_type &s) { p=s.p; } - template - bool operator ==(C* v) const { return p==v; } - template - bool operator ==(const C* v) const { return p==v; } - template - bool operator ==(C j) const { return integer() == j; } - bool operator ==(self_type s) const { return p==s.p; } - bool operator !=(self_type s) const { return p!=s.p; } - template void print(O&o) const - { - if (is_integer()) - o << integer(); - else { - o << "0x" << std::hex << (size_t)pointer() << std::dec; - } - } - friend inline std::ostream& operator<<(std::ostream &o,self_type const& s) { - s.print(o); return o; - } -protected: - union { - value_type p; // must be even (guaranteed unless you're pointing at packed chars) - integer_type i; // stored as 2*data+1, so only has half the range (one less bit) of a normal integer_type - }; -}; - - -#endif diff --git a/utils/intern_pool.h b/utils/intern_pool.h deleted file mode 100644 index 7c739add..00000000 --- a/utils/intern_pool.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef INTERN_POOL_H -#define INTERN_POOL_H - -#define DEBUG_INTERN_POOL(x) x - -/* to "intern" a string in lisp is to make a symbol from it (a pointer to a canonical copy whose pointer can be equality-compared/hashed directly with other interned things). we take an Item that has a key part and some mutable parts (that aren't in its identity), and we hash-by-value the key part to map to a canonical on-heap Item - and we use a boost object pool to allocate them */ - -//FIXME: actually store function object state (assumed stateless so far) - -#include -#include "hash.h" -//#include "null_traits.h" -#include - -template -struct get_key { // default accessor for I = like pair - typedef typename I::first_type const& result_type; - typedef I const& argument_type; - result_type operator()(I const& i) const { - return i.first; - } -}; - -// Arg type should be the non-pointer version. this saves me from using boost type traits to remove_pointer. f may be binary or unary -template -struct compose_indirect { - typedef Arg *argument_type; // we also accept Arg & - KeyF kf; - F f; - typedef typename F::result_type result_type; - result_type operator()(Arg const& p) const { - return f(kf(p)); - } - result_type operator()(Arg & p) const { - return f(kf(p)); - } - result_type operator()(Arg * p) const { - return f(kf(*p)); - } - template - result_type operator()(V const& v) const { - return f(kf(*v)); - } - - result_type operator()(Arg const& a1,Arg const& a2) const { - return f(kf(a1),kf(a2)); - } - result_type operator()(Arg & a1,Arg & a2) const { - return f(kf(a1),kf(a2)); - } - result_type operator()(Arg * a1,Arg * a2) const { - return f(kf(*a1),kf(*a2)); - } - template - result_type operator()(V const& v,W const&w) const { - return f(kf(*v),kf(*w)); - } - - -}; - -template -struct equal_indirect { - typedef Arg *argument_type; // we also accept Arg & - KeyF kf; - F f; - typedef bool result_type; - - result_type operator()(Arg const& a1,Arg const& a2) const { - return f(kf(a1),kf(a2)); - } - result_type operator()(Arg & a1,Arg & a2) const { - return f(kf(a1),kf(a2)); - } - result_type operator()(Arg * a1,Arg * a2) const { - return a1==a2||(a1&&a2&&f(kf(*a1),kf(*a2))); - } - template - result_type operator()(V const& v,W const&w) const { - return v==w||(v&&w&&f(kf(*v),kf(*w))); - } - - -}; - -/* - -template -struct indirect_function { - F f; - explicit indirect_function(F const& f=F()) : f(f) {} - typedef typename F::result_type result_type; - template - result_type operator()(V *p) const { - return f(*p); - } -}; -*/ - -template ,class HashKey=boost::hash,class EqKey=std::equal_to, class Pool=boost::object_pool > -struct intern_pool : Pool { - KeyF key; - typedef typename KeyF::result_type Key; - typedef Item *Handle; - typedef compose_indirect HashDeep; - typedef equal_indirect EqDeep; - typedef HASH_SET Canonical; - typedef typename Canonical::iterator CFind; - typedef std::pair CInsert; - Canonical canonical; - bool interneq(Handle &i) { // returns true if i is newly interned, false if it already existed - CInsert i_new=canonical.insert(i); - i=*i_new.first; - return i_new.second; - } -// inherited: Handle construct(...) - Handle construct_fresh() { return Pool::construct(); } - Handle intern(Handle i) { // (maybe invalidating i, returning a valid canonical handle (pointer) - CInsert i_new=canonical.insert(i); - if (i_new.second) - return i; - else { - free(i); - return *i_new->first; - } - } - void destroy_interned(Handle i) { - DEBUG_INTERN_POOL(assert(canonical.find(i)!=canonical.end())); - canonical.erase(i); - destroy(i); - } - bool destroy_fresh(Handle i) { - DEBUG_INTERN_POOL(assert(canonical.find(i)!=canonical.end()||*canonical.find(i)!=i)); // i is a constructed item not yet interned. - destroy(i); - } - void destroy_both(Handle i) { // i must have come from this pool. may be interned, or not. destroy both the noninterned and interned. - if (!destroy_if_interned(i)) destroy(i); - } - // destroy intern(i) if it exists. return true if it existed AND its address was i. otherwise return false (whether or not a value-equal item existed and was destroyed) - bool destroy_if_interned(Handle i) { - CFind f=canonical.find(i); - if (f!=canonical.end()) { - Handle interned=*f; - canonical.erase(f); - destroy(f); - if (f==i) return true; - } - return false; - } - - intern_pool() { - HASH_MAP_EMPTY(canonical,(Handle)0); - } -}; - - - -#endif diff --git a/utils/lvalue_pmap.h b/utils/lvalue_pmap.h deleted file mode 100644 index 5b9403c0..00000000 --- a/utils/lvalue_pmap.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef LVALUE_PMAP_H -#define LVALUE_PMAP_H - -#include - -// i checked: boost provides get and put given [] - but it's not being found by ADL so instead i define them myself - -// lvalue property map pmapname

that is: P p; valtype &v=p->name; -#define PMAP_MEMBER_INDIRECT(pmapname,valtype,name) template struct pmapname { \ - typedef P key_type; \ - typedef valtype value_type; \ - typedef value_type & reference; \ - typedef boost::lvalue_property_map_tag category; \ - reference operator[](key_type p) const { return p->name; } \ - typedef pmapname

self_type; \ - friend inline value_type const& get(self_type const&,key_type p) { return p->name; } \ - friend inline void put(self_type &,key_type p,value_type const& v) { p->name = v; } \ -}; - -#define PMAP_MEMBER_INDIRECT_2(pmapname,name) template struct pmapname { \ - typedef P key_type; \ - typedef R value_type; \ - typedef value_type & reference; \ - typedef boost::lvalue_property_map_tag category; \ - reference operator[](key_type p) const { return p->name; } \ - typedef pmapname self_type; \ - friend inline value_type const& get(self_type const&,key_type p) { return p->name; } \ - friend inline void put(self_type &,key_type p,value_type const& v) { p->name = v; } \ -}; - -#endif diff --git a/utils/max_plus.h b/utils/max_plus.h deleted file mode 100644 index 2e56f85e..00000000 --- a/utils/max_plus.h +++ /dev/null @@ -1,201 +0,0 @@ -#ifndef MAX_PLUS_H_ -#define MAX_PLUS_H_ - -#define MAX_PLUS_ORDER 0 -#define MAX_PLUS_DEBUG(x) - -// max-plus algebra. ordering a > b really means that (i.e. default a > around -// x+y := max{x,y} -// x*y := x+y -// 0 := -inf -// 1 := 0 -// additive inverse does not, but mult. does. (inverse()) and x/y := x-y = x+y.inverse() -//WARNING: default order is reversed, on purpose, i.e. alog(p_b). sorry. defaults in libs are to order ascending, but we want best first. - -#include -#include -#include -#include -#include -#include -#include "semiring.h" -#include "show.h" -//#include "logval.h" - -template -class MaxPlus { - public: - void print(std::ostream &o) const { - o<) - template - void operator=(O const& o) { - v_=o.v_; - } - template - MaxPlus(O const& o) : v_(o.v_) { } - - typedef MaxPlus Self; - MaxPlus() : v_(LOGVAL_LOG0) {} - explicit MaxPlus(double x) : v_(std::log(x)) {} - MaxPlus(init_1) : v_(0) { } - MaxPlus(init_0) : v_(LOGVAL_LOG0) { } - MaxPlus(int x) : v_(std::log(x)) {} - MaxPlus(unsigned x) : v_(std::log(x)) { } - MaxPlus(double lnx,bool sign) : v_(lnx) { MAX_PLUS_DEBUG(assert(!sign)); } - MaxPlus(double lnx,init_lnx) : v_(lnx) {} - static Self exp(T lnx) { return MaxPlus(lnx,false); } - - // maybe the below are faster than == 1 and == 0. i don't know. - bool is_1() const { return v_==0; } - bool is_0() const { return v_==LOGVAL_LOG0; } - - static Self One() { return Self(init_1()); } - static Self Zero() { return Self(init_0()); } - static Self e() { return Self(1,false); } - void logeq(const T& v) { v_ = v; } - bool signbit() const { return false; } - - Self& logpluseq(const Self& a) { - if (a.is_0()) return *this; - if (a.v_ < v_) { - v_ = v_ + log1p(std::exp(a.v_ - v_)); - } else { - v_ = a.v_ + log1p(std::exp(v_ - a.v_)); - } - return *this; - } - - Self& besteq(const Self& a) { - if (a.v_ < v_) - v_=a.v_; - return *this; - } - - Self& operator+=(const Self& a) { - if (a.v_ < v_) - v_=a.v_; - return *this; - } - - Self& operator*=(const Self& a) { - v_ += a.v_; - return *this; - } - - Self& operator/=(const Self& a) { - v_ -= a.v_; - return *this; - } - - // Self(fabs(log(x)),x.s_) - friend Self abslog(Self x) { - if (x.v_<0) x.v_=-x.v_; - return x; - } - - Self& poweq(const T& power) { - v_ *= power; - return *this; - } - - Self inverse() const { - return Self(-v_,false); - } - - Self pow(const T& power) const { - Self res = *this; - res.poweq(power); - return res; - } - - Self root(const T& root) const { - return pow(1/root); - } - -// copy elision - as opposed to explicit copy of Self const& o1, we should be able to construct Logval r=a+(b+c) as a single result in place in r. todo: return std::move(o1) - C++0x - friend inline Self operator+(Self a,Self const& b) { - a+=b; - return a; - } - friend inline Self operator*(Self a,Self const& b) { - a*=b; - return a; - } - friend inline Self operator/(Self a,Self const& b) { - a/=b; - return a; - } - friend inline T log(Self const& a) { - return a.v_; - } - friend inline T pow(Self const& a,T const& e) { - return a.pow(e); - } - - // intentionally not defining an operator < or operator > - because you may want to default (for library convenience) a v_; - } - friend inline bool operator==(Self const& lhs, Self const& rhs) { - return lhs.v_ == rhs.v_; - } - friend inline bool operator!=(Self const& lhs, Self const& rhs) { - return lhs.v_ != rhs.v_; - } - std::size_t hash() const { - using namespace boost; - return hash_value(v_); - } - friend inline std::size_t hash_value(Self const& x) { - return x.hash(); - } - -/* - operator T() const { - return std::exp(v_); - } -*/ - T as_float() const { - return std::exp(v_); - } - - T v_; -}; - -template -struct semiring_traits > : default_semiring_traits > { - static const bool has_logplus=true; - static const bool has_besteq=true; -#if MAX_PLUS_ORDER - static const bool have_order=true; -#endif -}; - -#if MAX_PLUS_ORDER -template -bool operator<(const MaxPlus& lhs, const MaxPlus& rhs) { - return (lhs.v_ < rhs.v_); -} - -template -bool operator<=(const MaxPlus& lhs, const MaxPlus& rhs) { - return (lhs.v_ <= rhs.v_); -} - -template -bool operator>(const MaxPlus& lhs, const MaxPlus& rhs) { - return (lhs.v_ > rhs.v_); -} - -template -bool operator>=(const MaxPlus& lhs, const MaxPlus& rhs) { - return (lhs.v_ >= rhs.v_); -} -#endif - -#endif diff --git a/utils/maybe_update_bound.h b/utils/maybe_update_bound.h deleted file mode 100644 index d57215d0..00000000 --- a/utils/maybe_update_bound.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef MAYBE_UPDATE_BOUND_H -#define MAYBE_UPDATE_BOUND_H - -template -inline void maybe_increase_max(To &to,const From &from) { - if (to -inline void maybe_decrease_min(To &to,const From &from) { - if (from - -template struct nan_static_assert; -template <> struct nan_static_assert { }; - -// is_iec559 i.e. only IEEE 754 float has x != x <=> x is nan -template -inline bool is_nan(T x) { -// static_cast(sizeof(nan_static_assert::has_quiet_NaN>)); - return std::numeric_limits::has_quiet_NaN && (x != x); -} - -template -inline bool is_inf(T x) { -// static_cast(sizeof(nan_static_assert::has_infinity>)); - return x == std::numeric_limits::infinity() || x == -std::numeric_limits::infinity(); -} - -template -inline bool is_pos_inf(T x) { -// static_cast(sizeof(nan_static_assert::has_infinity>)); - return x == std::numeric_limits::infinity(); -} - -template -inline bool is_neg_inf(T x) { -// static_cast(sizeof(nan_static_assert::has_infinity>)); - return x == -std::numeric_limits::infinity(); -} - -//c99 isfinite macro shoudl be much faster -template -inline bool is_finite(T x) { - return !is_nan(x) && !is_inf(x); -} - - -#endif diff --git a/utils/string_to.h b/utils/string_to.h deleted file mode 100644 index c78a5394..00000000 --- a/utils/string_to.h +++ /dev/null @@ -1,314 +0,0 @@ -#ifndef STRING_TO_H -#define STRING_TO_H - -/* - may not be any faster than boost::lexical_cast in later incarnations (see http://accu.org/index.php/journals/1375) - but is slightly simpler. no wide char or locale. - - X string_to(string); - string to_string(X); - X& string_into(string,X &); // note: returns the same ref you passed in, for convenience of use - - default implementation via stringstreams (quite slow, I'm sure) - - fast implementation for string, int<->string, unsigned<->string, float<->string, double<->string - -*/ - -#ifndef USE_FTOA -#define USE_FTOA 1 -#endif -#ifndef HAVE_STRTOUL -# define HAVE_STRTOUL 1 -#endif - -#include -#include -#include -#include - -#include "have_64_bits.h" -#include "utoa.h" -#if USE_FTOA -# include "ftoa.h" -#endif - -namespace { -// for faster numeric to/from string. TODO: separate into optional header -#include -#include -#include // access to evil (fast) C isspace etc. -#include //strtoul -} - -inline void throw_string_to(std::string const& msg,char const* prefix="string_to: ") { - throw std::runtime_error(prefix+msg); -} - -template -bool try_stream_into(I & i,To &to,bool complete=true) -{ - i >> to; - if (i.fail()) return false; - if (complete) { - char c; - return !(i >> c); - } - return true; -} - -template -bool try_string_into(Str const& str,To &to,bool complete=true) -{ - std::istringstream i(str); - return try_stream_into(i,to,complete); -} - -template inline -Data & string_into(const Str &str,Data &data) -{ - if (!try_string_into(str,data)) - throw std::runtime_error(std::string("Couldn't convert (string_into): ")+str); - return data; -} - - -template inline -Data string_to(const Str &str) -{ - Data ret; - string_into(str,ret); - return ret; -} - -template inline -std::string to_string(D const &d) -{ - std::ostringstream o; - o << d; - return o.str(); -} - -inline std::string to_string(unsigned x) { - return utos(x); -} - -inline std::string to_string(int x) { - return itos(x); -} - -inline long strtol_complete(char const* s,int base=10) { - char *e; - if (*s) { - long r=strtol(s,&e,base); - char c=*e; - if (!c || isspace(c)) //simplifying assumption: we're happy if there's other stuff in the string, so long as the number ends in a space or eos. TODO: loop consuming spaces until end? - return r; - } - throw_string_to(s,"Couldn't convert to integer: "); -} - -// returns -INT_MAX or INT_MAX if number is too large/small -inline int strtoi_complete_bounded(char const* s,int base=10) { - long l=strtol_complete(s,base); - if (l::min()) - return std::numeric_limits::min(); - if (l>std::numeric_limits::max()) - return std::numeric_limits::max(); - return l; -} -#define RANGE_STR(x) #x -#ifdef INT_MIN -# define INTRANGE_STR "[" RANGE_STR(INT_MIN) "," RANGE_STR(INT_MAX) "]" -#else -# define INTRANGE_STR "[-2137483648,2147483647]" -#endif - - // throw if out of int range -inline int strtoi_complete_exact(char const* s,int base=10) { - long l=strtol_complete(s,base); - if (l::min() || l>std::numeric_limits::max()) - throw_string_to(s,"Out of range for int " INTRANGE_STR ": "); - return l; -} - -#if HAVE_LONGER_LONG -inline int& string_into(std::string const& s,int &x) { - x=strtoi_complete_exact(s.c_str()); - return x; -} -inline int& string_into(char const* s,int &x) { - x=strtoi_complete_exact(s); - return x; -} -#endif - -inline long& string_into(std::string const& s,long &x) { - x=strtol_complete(s.c_str()); - return x; -} -inline long& string_into(char const* s,long &x) { - x=strtol_complete(s); - return x; -} - - -//FIXME: preprocessor separation for tokens int<->unsigned int, long<->unsigned long, strtol<->strtoul ? massive code duplication -inline unsigned long strtoul_complete(char const* s,int base=10) { - char *e; - if (*s) { -#if HAVE_STRTOUL - unsigned long r=strtoul(s,&e,base); -#else -// unsigned long r=strtol(s,&e,base); //FIXME: not usually safe - unsigned long r; - sscanf(s,"%ul",&r); -#endif - char c=*e; - if (!c || isspace(c)) //simplifying assumption: we're happy if there's other stuff in the string, so long as the number ends in a space or eos. TODO: loop consuming spaces until end? - return r; - } - throw_string_to(s,"Couldn't convert to integer: "); -} - -inline unsigned strtou_complete_bounded(char const* s,int base=10) { - unsigned long l=strtoul_complete(s,base); - if (l::min()) - return std::numeric_limits::min(); - if (l>std::numeric_limits::max()) - return std::numeric_limits::max(); - return l; -} - -#ifdef UINT_MIN -# define UINTRANGE_STR "[" RANGE_STR(UINT_MIN) "," RANGE_STR(UINT_MAX) "]" -#else -# define UINTRANGE_STR "[0,4,294,967,295]" -#endif - - // throw if out of int range -inline unsigned strtou_complete_exact(char const* s,int base=10) { - unsigned long l=strtoul_complete(s,base); - if (l::min() || l>std::numeric_limits::max()) - throw_string_to(s,"Out of range for uint " UINTRANGE_STR ": "); - return l; -} - -#if HAVE_LONGER_LONG -inline unsigned& string_into(std::string const& s,unsigned &x) { - x=strtou_complete_exact(s.c_str()); - return x; -} -inline unsigned& string_into(char const* s,unsigned &x) { - x=strtou_complete_exact(s); - return x; -} -#endif - -inline unsigned long& string_into(std::string const& s,unsigned long &x) { - x=strtoul_complete(s.c_str()); - return x; -} -inline unsigned long& string_into(char const* s,unsigned long &x) { - x=strtoul_complete(s); - return x; -} - -//FIXME: end code duplication - - -/* 9 decimal places needed to avoid rounding error in float->string->float. 17 for double->string->double - in terms of usable decimal places, there are 6 for float and 15 for double - */ -inline std::string to_string_roundtrip(float x) { - char buf[17]; - return std::string(buf,buf+sprintf(buf,"%.9g",x)); -} -inline std::string to_string(float x) { -#if USE_FTOA - return ftos(x); -#else - char buf[15]; - return std::string(buf,buf+sprintf(buf,"%.7g",x)); -#endif -} -inline std::string to_string_roundtrip(double x) { - char buf[32]; - return std::string(buf,buf+sprintf(buf,"%.17g",x)); -} -inline std::string to_string(double x) { -#if USE_FTOA - return ftos(x); -#else - char buf[30]; - return std::string(buf,buf+sprintf(buf,"%.15g",x)); -#endif -} - -inline double& string_into(char const* s,double &x) { - x=std::atof(s); - return x; -} -inline float& string_into(char const* s,float &x) { - x=std::atof(s); - return x; -} - -inline double& string_into(std::string const& s,double &x) { - x=std::atof(s.c_str()); - return x; -} -inline float& string_into(std::string const& s,float &x) { - x=std::atof(s.c_str()); - return x; -} - - -template -bool try_string_into(Str const& str,Str &to,bool complete=true) -{ - str=to; - return true; -} - -inline std::string const& to_string(std::string const& d) -{ - return d; -} - -template -Str const& string_to(Str const &s) -{ - return s; -} - -template -Str & string_into(Str const &s,Str &d) -{ - return d=s; -} - -/* - -template inline -void substring_into(const Str &str,size_type pos,size_type n,Data &data) -{ -// std::istringstream i(str,pos,n); // doesn't exist! - std::istringstream i(str.substr(pos,n)); - if (!(i>>*data)) - throw std::runtime_error("Couldn't convert (string_into): "+str); -} - -template inline -Data string_to(const Str &str,size_type pos,size_type n) -{ - Data ret; - substring_into(str,pos,n,ret); - return ret; -} - -*/ - - - -#endif -- cgit v1.2.3 From a872f46ce1212703b8bed562c894ea1a932c0746 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Wed, 29 Feb 2012 07:00:49 +0000 Subject: mpi fixes --- training/lbl_model.cc | 54 +++++++++++++++++++++++++++++++++++++++++---------- utils/corpus_tools.cc | 16 +++++++++------ 2 files changed, 54 insertions(+), 16 deletions(-) (limited to 'training') diff --git a/training/lbl_model.cc b/training/lbl_model.cc index def5075a..a46ce33c 100644 --- a/training/lbl_model.cc +++ b/training/lbl_model.cc @@ -15,6 +15,7 @@ #ifdef HAVE_MPI #include #include +#include namespace mpi = boost::mpi; #endif #include @@ -34,12 +35,26 @@ namespace mpi = boost::mpi; namespace po = boost::program_options; using namespace std; -#define kDIMENSIONS 100 +#define kDIMENSIONS 10 typedef Eigen::Matrix RVector; typedef Eigen::Matrix RTVector; typedef Eigen::Matrix TMatrix; vector r_src, r_trg; +#if HAVE_MPI +namespace boost { +namespace serialization { + +template +void serialize(Archive & ar, RVector & v, const unsigned int version) { + for (unsigned i = 0; i < kDIMENSIONS; ++i) + ar & v[i]; +} + +} // namespace serialization +} // namespace boost +#endif + bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() @@ -224,7 +239,7 @@ int main(int argc, char** argv) { srand(seed); } - TMatrix t; + TMatrix t = TMatrix::Zero(); if (rank == 0) { t = TMatrix::Random() / 50.0; for (unsigned i = 1; i < r_trg.size(); ++i) { @@ -241,16 +256,18 @@ int main(int argc, char** argv) { TMatrix g = TMatrix::Zero(); vector exp_src; vector z_src; - vector flat_g, flat_t; + vector flat_g, flat_t, rcv_grad; Flatten(t, &flat_t); bool converged = false; - // TODO broadcast embeddings - for (int iter = 0; !converged && iter < ITERATIONS; ++iter) { -#ifdef HAVE_MPI - mpi::broadcast(world, &flat_t[0], flat_t.size(), 0); +#if HAVE_MPI + mpi::broadcast(world, &flat_t[0], flat_t.size(), 0); + mpi::broadcast(world, r_trg, 0); + mpi::broadcast(world, r_src, 0); #endif + cerr << "rank=" << rank << ": " << r_trg[0][4] << endl; + for (int iter = 0; !converged && iter < ITERATIONS; ++iter) { + if (rank == 0) cerr << "ITERATION " << (iter + 1) << endl; Unflatten(flat_t, &t); - cerr << "ITERATION " << (iter + 1) << endl; double likelihood = 0; double denom = 0.0; lc = 0; @@ -350,7 +367,22 @@ int main(int argc, char** argv) { if (!SGD) { Flatten(g, &flat_g); obj = -likelihood; - // TODO - reduce gradient +#if HAVE_MPI + rcv_grad.resize(flat_g.size(), 0.0); + mpi::reduce(world, &flat_g[0], flat_g.size(), &rcv_grad[0], plus(), 0); + swap(flat_g, rcv_grad); + rcv_grad.clear(); + + double to = 0; + mpi::reduce(world, obj, to, plus(), 0); + obj = to; + double tlh = 0; + mpi::reduce(world, likelihood, tlh, plus(), 0); + likelihood = tlh; + double td = 0; + mpi::reduce(world, denom, td, plus(), 0); + denom = td; +#endif } if (rank == 0) { @@ -376,10 +408,12 @@ int main(int argc, char** argv) { } } #ifdef HAVE_MPI + mpi::broadcast(world, &flat_t[0], flat_t.size(), 0); mpi::broadcast(world, converged, 0); #endif } - cerr << "TRANSLATION MATRIX:" << endl << t << endl; + if (rank == 0) + cerr << "TRANSLATION MATRIX:" << endl << t << endl; return 0; } diff --git a/utils/corpus_tools.cc b/utils/corpus_tools.cc index a0542b6e..d17785af 100644 --- a/utils/corpus_tools.cc +++ b/utils/corpus_tools.cc @@ -33,10 +33,12 @@ void CorpusTools::ReadFromFile(const string& filename, while(getline(in, line)) { const bool skip = (lc % size != rank); ++lc; - if (skip) continue; TD::ConvertSentence(line, &tmp); - src->push_back(vector()); - vector* d = &src->back(); + vector* d = NULL; + if (!skip) { + src->push_back(vector()); + d = &src->back(); + } set* v = src_vocab; int s = 0; for (unsigned i = 0; i < tmp.size(); ++i) { @@ -44,11 +46,13 @@ void CorpusTools::ReadFromFile(const string& filename, ++s; if (s > 1) { cerr << "Unexpected format in line " << lc << ": " << line << endl; abort(); } assert(trg); - trg->push_back(vector()); - d = &trg->back(); + if (!skip) { + trg->push_back(vector()); + d = &trg->back(); + } v = trg_vocab; } else { - d->push_back(tmp[i]); + if (d) d->push_back(tmp[i]); if (v) v->insert(tmp[i]); } } -- cgit v1.2.3 From 63ea78b71bf913be248b064219734cac5ce41be2 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Wed, 7 Mar 2012 18:54:11 +0000 Subject: better logging with MPI --- training/mpi_flex_optimize.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'training') diff --git a/training/mpi_flex_optimize.cc b/training/mpi_flex_optimize.cc index 00746532..d98ea4dc 100644 --- a/training/mpi_flex_optimize.cc +++ b/training/mpi_flex_optimize.cc @@ -205,7 +205,7 @@ int main(int argc, char** argv) { const int size = 1; const int rank = 0; #endif - if (size > 0) SetSilent(true); // turn off verbose decoder output + if (size > 1) SetSilent(true); // turn off verbose decoder output register_feature_functions(); MT19937* rng = NULL; @@ -343,7 +343,7 @@ int main(int argc, char** argv) { double obj = 0; #ifdef HAVE_MPI - // TODO obj + reduce(world, local_obj, obj, std::plus(), 0); reduce(world, local_grad, g, std::plus >(), 0); #else obj = local_obj; -- cgit v1.2.3 From fdeb2267eb843ef80b5f2b95234a72c9c3333bbe Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Wed, 7 Mar 2012 20:13:58 +0000 Subject: more mpi fixes --- training/mpi_flex_optimize.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'training') diff --git a/training/mpi_flex_optimize.cc b/training/mpi_flex_optimize.cc index d98ea4dc..a9197208 100644 --- a/training/mpi_flex_optimize.cc +++ b/training/mpi_flex_optimize.cc @@ -272,6 +272,7 @@ int main(int argc, char** argv) { int iter = -1; bool converged = false; + vector gg; while (!converged) { #ifdef HAVE_MPI mpi::timer timer; @@ -354,13 +355,14 @@ int main(int argc, char** argv) { // g /= (size_per_proc * size); if (!o) o.reset(new LBFGSOptimizer(FD::NumFeats(), lbfgs_memory_buffers)); - vector gg(FD::NumFeats()); + gg.clear(); + gg.resize(FD::NumFeats()); if (gg.size() != cur_weights.size()) { cur_weights.resize(gg.size()); } for (SparseVector::const_iterator it = g.begin(); it != g.end(); ++it) if (it->first) { gg[it->first] = it->second; } g.clear(); double r = ApplyRegularizationTerms(regularization_strength, - time_series_strength * (iter == 0 ? 0.0 : 1.0), + time_series_strength, // * (iter == 0 ? 0.0 : 1.0), cur_weights, prev_weights, &gg); @@ -375,10 +377,9 @@ int main(int argc, char** argv) { o->Optimize(obj, gg, &cur_weights); } #ifdef HAVE_MPI - // broadcast(world, x, 0); + broadcast(world, cur_weights, 0); broadcast(world, converged, 0); world.barrier(); - if (rank == 0) { cerr << " ELAPSED TIME THIS ITERATION=" << timer.elapsed() << endl; } #endif } prev_weights = cur_weights; -- cgit v1.2.3