From 0b598b997a7c1d2d9dc255cc2ff1bf9bb2c425a1 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Thu, 15 Mar 2012 22:47:04 -0400 Subject: bayes bayes bayes --- gi/pf/Makefile.am | 7 +- gi/pf/align-lexonly-pyp.cc | 10 ++- gi/pf/hpyp_tm.cc | 133 +++++++++++++++++++++++++++++++++++++ gi/pf/hpyp_tm.h | 38 +++++++++++ gi/pf/poisson_uniform_word_model.h | 50 ++++++++++++++ gi/pf/pyp_tm.cc | 11 ++- gi/pf/pyp_tm.h | 7 +- gi/pf/pyp_word_model.cc | 20 ------ gi/pf/pyp_word_model.h | 46 +++++++------ gi/pf/quasi_model2.h | 13 +++- gi/pf/tied_resampler.h | 6 +- utils/ccrp.h | 4 ++ utils/mfcr.h | 4 ++ 13 files changed, 288 insertions(+), 61 deletions(-) create mode 100644 gi/pf/hpyp_tm.cc create mode 100644 gi/pf/hpyp_tm.h create mode 100644 gi/pf/poisson_uniform_word_model.h delete mode 100644 gi/pf/pyp_word_model.cc diff --git a/gi/pf/Makefile.am b/gi/pf/Makefile.am index f9c979d0..d365016b 100644 --- a/gi/pf/Makefile.am +++ b/gi/pf/Makefile.am @@ -1,8 +1,11 @@ -bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl +bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test noinst_LIBRARIES = libpf.a -libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc pyp_word_model.cc pyp_tm.cc +libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc hpyp_tm.cc pyp_tm.cc + +pf_test_SOURCES = pf_test.cc +pf_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz nuisance_test_SOURCES = nuisance_test.cc nuisance_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz diff --git a/gi/pf/align-lexonly-pyp.cc b/gi/pf/align-lexonly-pyp.cc index 942dcf51..e7509f57 100644 --- a/gi/pf/align-lexonly-pyp.cc +++ b/gi/pf/align-lexonly-pyp.cc @@ -11,6 +11,7 @@ #include "sampler.h" #include "corpus.h" #include "pyp_tm.h" +#include "hpyp_tm.h" #include "quasi_model2.h" using namespace std; @@ -61,15 +62,17 @@ struct AlignedSentencePair { Array2D posterior; }; +template struct Aligner { Aligner(const vector >& lets, + int vocab_size, int num_letters, const po::variables_map& conf, vector* c) : corpus(*c), paj_model(conf["align_alpha"].as(), conf["p_null"].as()), infer_paj(conf.count("infer_alignment_hyperparameters") > 0), - model(lets, num_letters), + model(lets, vocab_size, num_letters), kNULL(TD::Convert("NULL")) { assert(lets[kNULL].size() == 0); } @@ -77,7 +80,7 @@ struct Aligner { vector& corpus; QuasiModel2 paj_model; const bool infer_paj; - PYPLexicalTranslation model; + LexicalTranslationModel model; const WordID kNULL; void ResampleHyperparameters() { @@ -217,7 +220,8 @@ int main(int argc, char** argv) { ExtractLetters(vocabf, &letters, NULL); letters[TD::Convert("NULL")].clear(); - Aligner aligner(letters, letset.size(), conf, &corpus); + //Aligner aligner(letters, vocabe.size(), letset.size(), conf, &corpus); + Aligner aligner(letters, vocabe.size(), letset.size(), conf, &corpus); aligner.InitializeRandom(); const unsigned samples = conf["samples"].as(); diff --git a/gi/pf/hpyp_tm.cc b/gi/pf/hpyp_tm.cc new file mode 100644 index 00000000..784f9958 --- /dev/null +++ b/gi/pf/hpyp_tm.cc @@ -0,0 +1,133 @@ +#include "hpyp_tm.h" + +#include +#include +#include + +#include "tdict.h" +#include "ccrp.h" +#include "pyp_word_model.h" +#include "tied_resampler.h" + +using namespace std; +using namespace std::tr1; + +struct FreqBinner { + FreqBinner(const std::string& fname) { fd_.Load(fname); } + unsigned NumberOfBins() const { return fd_.Max() + 1; } + unsigned Bin(const WordID& w) const { return fd_.LookUp(w); } + FreqDict fd_; +}; + +template +struct ConditionalPYPWordModel { + ConditionalPYPWordModel(Base* b, const Binner* bnr = NULL) : + base(*b), + binner(bnr), + btr(binner ? binner->NumberOfBins() + 1u : 2u) {} + + void Summary() const { + cerr << "Number of conditioning contexts: " << r.size() << endl; + for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { + cerr << TD::Convert(it->first) << " \tPYP(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << endl; + for (CCRP >::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) + cerr << " " << i2->second.total_dish_count_ << '\t' << TD::GetString(i2->first) << endl; + } + } + + void ResampleHyperparameters(MT19937* rng) { + btr.ResampleHyperparameters(rng); + } + + prob_t Prob(const WordID src, const vector& trglets) const { + RuleModelHash::const_iterator it = r.find(src); + if (it == r.end()) { + return base(trglets); + } else { + return it->second.prob(trglets, base(trglets)); + } + } + + void Increment(const WordID src, const vector& trglets, MT19937* rng) { + RuleModelHash::iterator it = r.find(src); + if (it == r.end()) { + it = r.insert(make_pair(src, CCRP >(0.5,1.0))).first; + static const WordID kNULL = TD::Convert("NULL"); + unsigned bin = (src == kNULL ? 0 : 1); + if (binner && bin) { bin = binner->Bin(src) + 1; } + btr.Add(bin, &it->second); + } + if (it->second.increment(trglets, base(trglets), rng)) + base.Increment(trglets, rng); + } + + void Decrement(const WordID src, const vector& trglets, MT19937* rng) { + RuleModelHash::iterator it = r.find(src); + assert(it != r.end()); + if (it->second.decrement(trglets, rng)) { + base.Decrement(trglets, rng); + } + } + + prob_t Likelihood() const { + prob_t p = prob_t::One(); + for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { + prob_t q; q.logeq(it->second.log_crp_prob()); + p *= q; + } + return p; + } + + unsigned UniqueConditioningContexts() const { + return r.size(); + } + + // TODO tie PYP hyperparameters based on source word frequency bins + Base& base; + const Binner* binner; + BinTiedResampler > > btr; + typedef unordered_map > > RuleModelHash; + RuleModelHash r; +}; + +HPYPLexicalTranslation::HPYPLexicalTranslation(const vector >& lets, + const unsigned vocab_size, + const unsigned num_letters) : + letters(lets), + base(vocab_size, num_letters, 5), + up0(new PYPWordModel(&base)), + tmodel(new ConditionalPYPWordModel >(up0, new FreqBinner("10k.freq"))), + kX(-TD::Convert("X")) {} + +void HPYPLexicalTranslation::Summary() const { + tmodel->Summary(); + up0->Summary(); +} + +prob_t HPYPLexicalTranslation::Likelihood() const { + prob_t p = up0->Likelihood(); + p *= tmodel->Likelihood(); + return p; +} + +void HPYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { + tmodel->ResampleHyperparameters(rng); + up0->ResampleHyperparameters(rng); +} + +unsigned HPYPLexicalTranslation::UniqueConditioningContexts() const { + return tmodel->UniqueConditioningContexts(); +} + +prob_t HPYPLexicalTranslation::Prob(WordID src, WordID trg) const { + return tmodel->Prob(src, letters[trg]); +} + +void HPYPLexicalTranslation::Increment(WordID src, WordID trg, MT19937* rng) { + tmodel->Increment(src, letters[trg], rng); +} + +void HPYPLexicalTranslation::Decrement(WordID src, WordID trg, MT19937* rng) { + tmodel->Decrement(src, letters[trg], rng); +} + diff --git a/gi/pf/hpyp_tm.h b/gi/pf/hpyp_tm.h new file mode 100644 index 00000000..af3215ba --- /dev/null +++ b/gi/pf/hpyp_tm.h @@ -0,0 +1,38 @@ +#ifndef HPYP_LEX_TRANS +#define HPYP_LEX_TRANS + +#include +#include "wordid.h" +#include "prob.h" +#include "sampler.h" +#include "freqdict.h" +#include "poisson_uniform_word_model.h" + +struct FreqBinner; +template struct PYPWordModel; +template struct ConditionalPYPWordModel; + +struct HPYPLexicalTranslation { + explicit HPYPLexicalTranslation(const std::vector >& lets, + const unsigned vocab_size, + const unsigned num_letters); + + prob_t Likelihood() const; + + void ResampleHyperparameters(MT19937* rng); + prob_t Prob(WordID src, WordID trg) const; // return p(trg | src) + void Summary() const; + void Increment(WordID src, WordID trg, MT19937* rng); + void Decrement(WordID src, WordID trg, MT19937* rng); + unsigned UniqueConditioningContexts() const; + + private: + const std::vector >& letters; // spelling dictionary + PoissonUniformWordModel base; // "generator" of English types + PYPWordModel* up0; // model English lexicon + ConditionalPYPWordModel, FreqBinner>* tmodel; // translation distributions + // (model English word | French word) + const WordID kX; +}; + +#endif diff --git a/gi/pf/poisson_uniform_word_model.h b/gi/pf/poisson_uniform_word_model.h new file mode 100644 index 00000000..76204a0e --- /dev/null +++ b/gi/pf/poisson_uniform_word_model.h @@ -0,0 +1,50 @@ +#ifndef _POISSON_UNIFORM_WORD_MODEL_H_ +#define _POISSON_UNIFORM_WORD_MODEL_H_ + +#include +#include +#include "prob.h" +#include "m.h" + +// len ~ Poisson(lambda) +// for (1..len) +// e_i ~ Uniform({Vocabulary}) +struct PoissonUniformWordModel { + explicit PoissonUniformWordModel(const unsigned vocab_size, + const unsigned alphabet_size, + const double mean_len = 5) : + lh(prob_t::One()), + v0(-std::log(vocab_size)), + u0(-std::log(alphabet_size)), + mean_length(mean_len) {} + + void ResampleHyperparameters(MT19937*) {} + + inline prob_t operator()(const std::vector& s) const { + prob_t p; + p.logeq(Md::log_poisson(s.size(), mean_length) + s.size() * u0); + //p.logeq(v0); + return p; + } + + inline void Increment(const std::vector& w, MT19937*) { + lh *= (*this)(w); + } + + inline void Decrement(const std::vector& w, MT19937 *) { + lh /= (*this)(w); + } + + inline prob_t Likelihood() const { return lh; } + + void Summary() const {} + + private: + + prob_t lh; // keeps track of the draws from the base distribution + const double v0; // uniform log prob of generating a word + const double u0; // uniform log prob of generating a letter + const double mean_length; // mean length of a word in the base distribution +}; + +#endif diff --git a/gi/pf/pyp_tm.cc b/gi/pf/pyp_tm.cc index e21f0267..6bc8a5bf 100644 --- a/gi/pf/pyp_tm.cc +++ b/gi/pf/pyp_tm.cc @@ -91,26 +91,23 @@ struct ConditionalPYPWordModel { }; PYPLexicalTranslation::PYPLexicalTranslation(const vector >& lets, + const unsigned vocab_size, const unsigned num_letters) : letters(lets), - up0(new PYPWordModel(num_letters)), - tmodel(new ConditionalPYPWordModel(up0, new FreqBinner("10k.freq"))), + base(vocab_size, num_letters, 5), + tmodel(new ConditionalPYPWordModel(&base, new FreqBinner("10k.freq"))), kX(-TD::Convert("X")) {} void PYPLexicalTranslation::Summary() const { tmodel->Summary(); - up0->Summary(); } prob_t PYPLexicalTranslation::Likelihood() const { - prob_t p = up0->Likelihood(); - p *= tmodel->Likelihood(); - return p; + return tmodel->Likelihood() * base.Likelihood(); } void PYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { tmodel->ResampleHyperparameters(rng); - up0->ResampleHyperparameters(rng); } unsigned PYPLexicalTranslation::UniqueConditioningContexts() const { diff --git a/gi/pf/pyp_tm.h b/gi/pf/pyp_tm.h index 63e7c96d..2b076a25 100644 --- a/gi/pf/pyp_tm.h +++ b/gi/pf/pyp_tm.h @@ -6,13 +6,14 @@ #include "prob.h" #include "sampler.h" #include "freqdict.h" +#include "poisson_uniform_word_model.h" struct FreqBinner; -struct PYPWordModel; template struct ConditionalPYPWordModel; struct PYPLexicalTranslation { explicit PYPLexicalTranslation(const std::vector >& lets, + const unsigned vocab_size, const unsigned num_letters); prob_t Likelihood() const; @@ -26,8 +27,8 @@ struct PYPLexicalTranslation { private: const std::vector >& letters; // spelling dictionary - PYPWordModel* up0; // base distribuction (model English word) - ConditionalPYPWordModel* tmodel; // translation distributions + PoissonUniformWordModel base; // "generator" of English types + ConditionalPYPWordModel* tmodel; // translation distributions // (model English word | French word) const WordID kX; }; diff --git a/gi/pf/pyp_word_model.cc b/gi/pf/pyp_word_model.cc deleted file mode 100644 index 12df4abf..00000000 --- a/gi/pf/pyp_word_model.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "pyp_word_model.h" - -#include - -using namespace std; - -void PYPWordModel::ResampleHyperparameters(MT19937* rng) { - r.resample_hyperparameters(rng); - cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; -} - -void PYPWordModel::Summary() const { - cerr << "PYPWordModel: generations=" << r.num_customers() - << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << endl; - for (CCRP >::const_iterator it = r.begin(); it != r.end(); ++it) - cerr << " " << it->second.total_dish_count_ - << " (on " << it->second.table_counts_.size() << " tables) " - << TD::GetString(it->first) << endl; -} - diff --git a/gi/pf/pyp_word_model.h b/gi/pf/pyp_word_model.h index ff366865..224a9034 100644 --- a/gi/pf/pyp_word_model.h +++ b/gi/pf/pyp_word_model.h @@ -11,48 +11,52 @@ #include "os_phrase.h" // PYP(d,s,poisson-uniform) represented as a CRP +template struct PYPWordModel { - explicit PYPWordModel(const unsigned vocab_e_size, const double mean_len = 5) : - base(prob_t::One()), r(1,1,1,1,0.66,50.0), u0(-std::log(vocab_e_size)), mean_length(mean_len) {} - - void ResampleHyperparameters(MT19937* rng); + explicit PYPWordModel(Base* b) : + base(*b), + r(1,1,1,1,0.66,50.0) + {} + + void ResampleHyperparameters(MT19937* rng) { + r.resample_hyperparameters(rng); + std::cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; + } inline prob_t operator()(const std::vector& s) const { - return r.prob(s, p0(s)); + return r.prob(s, base(s)); } inline void Increment(const std::vector& s, MT19937* rng) { - if (r.increment(s, p0(s), rng)) - base *= p0(s); + if (r.increment(s, base(s), rng)) + base.Increment(s, rng); } inline void Decrement(const std::vector& s, MT19937 *rng) { if (r.decrement(s, rng)) - base /= p0(s); + base.Decrement(s, rng); } inline prob_t Likelihood() const { prob_t p; p.logeq(r.log_crp_prob()); - p *= base; + p *= base.Likelihood(); return p; } - void Summary() const; - - private: - inline double logp0(const std::vector& s) const { - return Md::log_poisson(s.size(), mean_length) + s.size() * u0; + void Summary() const { + std::cerr << "PYPWordModel: generations=" << r.num_customers() + << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << std::endl; + for (typename CCRP >::const_iterator it = r.begin(); it != r.end(); ++it) { + std::cerr << " " << it->second.total_dish_count_ + << " (on " << it->second.table_counts_.size() << " tables) " + << TD::GetString(it->first) << std::endl; + } } - inline prob_t p0(const std::vector& s) const { - prob_t p; p.logeq(logp0(s)); - return p; - } + private: - prob_t base; // keeps track of the draws from the base distribution + Base& base; // keeps track of the draws from the base distribution CCRP > r; - const double u0; // uniform log prob of generating a letter - const double mean_length; // mean length of a word in the base distribution }; #endif diff --git a/gi/pf/quasi_model2.h b/gi/pf/quasi_model2.h index 588c8f84..4075affe 100644 --- a/gi/pf/quasi_model2.h +++ b/gi/pf/quasi_model2.h @@ -9,6 +9,7 @@ #include "array2d.h" #include "slice_sampler.h" #include "m.h" +#include "have_64_bits.h" struct AlignmentObservation { AlignmentObservation() : src_len(), trg_len(), j(), a_j() {} @@ -20,13 +21,23 @@ struct AlignmentObservation { unsigned short a_j; }; +#ifdef HAVE_64_BITS inline size_t hash_value(const AlignmentObservation& o) { return reinterpret_cast(o); } - inline bool operator==(const AlignmentObservation& a, const AlignmentObservation& b) { return hash_value(a) == hash_value(b); } +#else +inline size_t hash_value(const AlignmentObservation& o) { + size_t h = 1; + boost::hash_combine(h, o.src_len); + boost::hash_combine(h, o.trg_len); + boost::hash_combine(h, o.j); + boost::hash_combine(h, o.a_j); + return h; +} +#endif struct QuasiModel2 { explicit QuasiModel2(double alpha, double pnull = 0.1) : diff --git a/gi/pf/tied_resampler.h b/gi/pf/tied_resampler.h index 6f45fbce..a4f4af36 100644 --- a/gi/pf/tied_resampler.h +++ b/gi/pf/tied_resampler.h @@ -78,10 +78,8 @@ struct TiedResampler { std::numeric_limits::infinity(), 0.0, niterations, 100*niterations); std::cerr << "TiedCRPs(d=" << discount << ",s=" << strength << ") = " << LogLikelihood(discount, strength) << std::endl; - for (typename std::set::iterator it = crps.begin(); it != crps.end(); ++it) { - (*it)->set_discount(discount); - (*it)->set_strength(strength); - } + for (typename std::set::iterator it = crps.begin(); it != crps.end(); ++it) + (*it)->set_hyperparameters(discount, strength); } private: std::set crps; diff --git a/utils/ccrp.h b/utils/ccrp.h index 4a8b80e7..390d4994 100644 --- a/utils/ccrp.h +++ b/utils/ccrp.h @@ -55,6 +55,10 @@ class CCRP { double discount() const { return discount_; } double strength() const { return strength_; } + void set_hyperparameters(double d, double s) { + discount_ = d; strength_ = s; + check_hyperparameters(); + } void set_discount(double d) { discount_ = d; check_hyperparameters(); } void set_strength(double a) { strength_ = a; check_hyperparameters(); } diff --git a/utils/mfcr.h b/utils/mfcr.h index 886f01ef..4aacb567 100644 --- a/utils/mfcr.h +++ b/utils/mfcr.h @@ -73,6 +73,10 @@ class MFCR { double discount() const { return discount_; } double strength() const { return strength_; } + void set_hyperparameters(double d, double s) { + discount_ = d; strength_ = s; + check_hyperparameters(); + } void set_discount(double d) { discount_ = d; check_hyperparameters(); } void set_strength(double a) { strength_ = a; check_hyperparameters(); } -- cgit v1.2.3 From 34b4752a1eefc002166e95782c2c52747bb08b3a Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 20 Mar 2012 15:37:54 -0400 Subject: make c++11 compatible --- decoder/decoder.cc | 31 +++++++++++++++---------------- decoder/earley_composer.cc | 4 +--- decoder/phrasetable_fst.cc | 3 +-- dpmert/ces.cc | 3 +-- dpmert/lo_test.cc | 13 ++++++------- dpmert/mert_geometry.cc | 15 +++++++-------- extools/extract.cc | 8 ++++---- extools/featurize_grammar.cc | 6 +++--- extools/sentence_pair.cc | 5 ++--- gi/pf/align-tl.cc | 2 +- gi/pf/brat.cc | 2 +- gi/pf/cfg_wfst_composer.cc | 3 +-- gi/pf/condnaive.cc | 2 +- gi/pf/dpnaive.cc | 2 +- gi/pf/itg.cc | 2 +- gi/pf/learn_cfg.cc | 2 +- gi/pf/pfbrat.cc | 2 +- gi/pf/pfdist.cc | 2 +- gi/pf/pfnaive.cc | 2 +- gi/pf/pyp_lm.cc | 2 +- mira/kbest_mira.cc | 19 +++++++++---------- mteval/ns.cc | 9 ++++----- phrasinator/gibbs_train_plm.cc | 2 +- phrasinator/gibbs_train_plm.notables.cc | 2 +- training/mpi_batch_optimize.cc | 3 +-- training/mr_optimize_reduce.cc | 3 +-- training/optimize_test.cc | 2 +- utils/atools.cc | 7 +++---- 28 files changed, 72 insertions(+), 86 deletions(-) diff --git a/decoder/decoder.cc b/decoder/decoder.cc index 69fbaf85..d4f8f06d 100644 --- a/decoder/decoder.cc +++ b/decoder/decoder.cc @@ -57,7 +57,6 @@ static const double kMINUS_EPSILON = -1e-6; // don't be too strict using namespace std; using namespace std::tr1; -using boost::shared_ptr; namespace po = boost::program_options; static bool verbose_feature_functions=true; @@ -101,7 +100,7 @@ inline string str(char const* name,po::variables_map const& conf) { // print just the --long_opt names suitable for bash compgen inline void print_options(std::ostream &out,po::options_description const& opts) { - typedef std::vector< shared_ptr > Ds; + typedef std::vector< boost::shared_ptr > Ds; Ds const& ds=opts.options(); out << '"'; for (unsigned i=0;i make_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { +inline boost::shared_ptr make_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { string ff, param; SplitCommandAndParam(ffp, &ff, ¶m); cerr << pre << "feature: " << ff; if (param.size() > 0) cerr << " (with config parameters '" << param << "')\n"; else cerr << " (no config parameters)\n"; - shared_ptr pf = ff_registry.Create(ff, param); + boost::shared_ptr pf = ff_registry.Create(ff, param); if (!pf) exit(1); int nbyte=pf->NumBytesContext(); if (verbose_feature_functions) @@ -135,13 +134,13 @@ inline shared_ptr make_ff(string const& ffp,bool verbose_featur } #ifdef FSA_RESCORING -inline shared_ptr make_fsa_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { +inline boost::shared_ptr make_fsa_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { string ff, param; SplitCommandAndParam(ffp, &ff, ¶m); cerr << "FSA Feature: " << ff; if (param.size() > 0) cerr << " (with config parameters '" << param << "')\n"; else cerr << " (no config parameters)\n"; - shared_ptr pf = fsa_ff_registry.Create(ff, param); + boost::shared_ptr pf = fsa_ff_registry.Create(ff, param); if (!pf) exit(1); if (verbose_feature_functions) cerr<<"State is "<state_bytes()<<" bytes for "< make_fsa_ff(string const& ffp,bool verbose // passes are carried over into subsequent passes (where they may have different weights). struct RescoringPass { RescoringPass() : fid_summary(), density_prune(), beam_prune() {} - shared_ptr models; - shared_ptr inter_conf; + boost::shared_ptr models; + boost::shared_ptr inter_conf; vector ffs; - shared_ptr > weight_vector; + boost::shared_ptr > weight_vector; int fid_summary; // 0 == no summary feature double density_prune; // 0 == don't density prune double beam_prune; // 0 == don't beam prune @@ -293,15 +292,15 @@ struct DecoderImpl { po::variables_map& conf; OracleBleu oracle; string formalism; - shared_ptr translator; - shared_ptr > init_weights; // weights used with initial parse - vector > pffs; + boost::shared_ptr translator; + boost::shared_ptr > init_weights; // weights used with initial parse + vector > pffs; #ifdef FSA_RESCORING CFGOptions cfg_options; - vector > fsa_ffs; + vector > fsa_ffs; vector fsa_names; #endif - shared_ptr > rng; + boost::shared_ptr > rng; int sample_max_trans; bool aligner_mode; bool graphviz; @@ -310,7 +309,7 @@ struct DecoderImpl { bool kbest; bool unique_kbest; bool get_oracle_forest; - shared_ptr extract_file; + boost::shared_ptr extract_file; int combine_size; int sent_id; SparseVector acc_vec; // accumulate gradient @@ -622,7 +621,7 @@ DecoderImpl::DecoderImpl(po::variables_map& conf, int argc, char** argv, istream } // set up weight vectors since later phases may reuse weights from earlier phases - shared_ptr > prev_weights = init_weights; + boost::shared_ptr > prev_weights = init_weights; for (int pass = 0; pass < rescoring_passes.size(); ++pass) { RescoringPass& rp = rescoring_passes[pass]; if (!rp.weight_vector) { diff --git a/decoder/earley_composer.cc b/decoder/earley_composer.cc index b7af801a..385baf8b 100644 --- a/decoder/earley_composer.cc +++ b/decoder/earley_composer.cc @@ -16,8 +16,6 @@ #include "tdict.h" #include "hg.h" -using boost::shared_ptr; -namespace po = boost::program_options; using namespace std; using namespace std::tr1; @@ -111,7 +109,7 @@ struct Edge { const Edge* const active_parent; // back pointer, NULL for PREDICT items const Edge* const passive_parent; // back pointer, NULL for SCAN and PREDICT items const TargetPhraseSet* const tps; // translations - shared_ptr > features; // features from CFG rule + boost::shared_ptr > features; // features from CFG rule bool IsPassive() const { // when a rule is completed, this value will be set diff --git a/decoder/phrasetable_fst.cc b/decoder/phrasetable_fst.cc index f421e941..b3bec86b 100644 --- a/decoder/phrasetable_fst.cc +++ b/decoder/phrasetable_fst.cc @@ -9,7 +9,6 @@ #include "filelib.h" #include "tdict.h" -using boost::shared_ptr; using namespace std; TargetPhraseSet::~TargetPhraseSet() {} @@ -46,7 +45,7 @@ class TextFSTNode : public FSTNode { void ClearPassThroughTranslations(); private: vector passthroughs; - shared_ptr data; + boost::shared_ptr data; map ptr; }; diff --git a/dpmert/ces.cc b/dpmert/ces.cc index a85454da..c6cb1cdf 100644 --- a/dpmert/ces.cc +++ b/dpmert/ces.cc @@ -11,7 +11,6 @@ #include "error_surface.h" #include "ns.h" -using boost::shared_ptr; using namespace std; const bool minimize_segments = true; // if adjacent segments have equal scores, merge them @@ -22,7 +21,7 @@ void ComputeErrorSurface(const SegmentEvaluator& ss, const EvaluationMetric* metric, const Hypergraph& hg) { vector prev_trans; - const vector >& ienv = ve.GetSortedSegs(); + const vector >& ienv = ve.GetSortedSegs(); env->resize(ienv.size()); SufficientStats prev_score; // defaults to 0 int j = 0; diff --git a/dpmert/lo_test.cc b/dpmert/lo_test.cc index d9b909b8..5d90aabb 100644 --- a/dpmert/lo_test.cc +++ b/dpmert/lo_test.cc @@ -19,7 +19,6 @@ #include "line_optimizer.h" using namespace std; -using boost::shared_ptr; class OptTest : public testing::Test { protected: @@ -44,12 +43,12 @@ TEST_F(OptTest, TestCheckNaN) { } TEST_F(OptTest,TestConvexHull) { - shared_ptr a1(new MERTPoint(-1, 0)); - shared_ptr b1(new MERTPoint(1, 0)); - shared_ptr a2(new MERTPoint(-1, 1)); - shared_ptr b2(new MERTPoint(1, -1)); - vector > sa; sa.push_back(a1); sa.push_back(b1); - vector > sb; sb.push_back(a2); sb.push_back(b2); + boost::shared_ptr a1(new MERTPoint(-1, 0)); + boost::shared_ptr b1(new MERTPoint(1, 0)); + boost::shared_ptr a2(new MERTPoint(-1, 1)); + boost::shared_ptr b2(new MERTPoint(1, -1)); + vector > sa; sa.push_back(a1); sa.push_back(b1); + vector > sb; sb.push_back(a2); sb.push_back(b2); ConvexHull a(sa); cerr << a << endl; ConvexHull b(sb); diff --git a/dpmert/mert_geometry.cc b/dpmert/mert_geometry.cc index 81b25af9..d6973658 100644 --- a/dpmert/mert_geometry.cc +++ b/dpmert/mert_geometry.cc @@ -4,13 +4,12 @@ #include using namespace std; -using boost::shared_ptr; ConvexHull::ConvexHull(int i) { if (i == 0) { // do nothing - <> } else if (i == 1) { - points.push_back(shared_ptr(new MERTPoint(0, 0, 0, shared_ptr(), shared_ptr()))); + points.push_back(boost::shared_ptr(new MERTPoint(0, 0, 0, boost::shared_ptr(), boost::shared_ptr()))); assert(this->IsMultiplicativeIdentity()); } else { cerr << "Only can create ConvexHull semiring 0 and 1 with this constructor!\n"; @@ -27,7 +26,7 @@ const ConvexHull ConvexHullWeightFunction::operator()(const Hypergraph::Edge& e) ostream& operator<<(ostream& os, const ConvexHull& env) { os << '<'; - const vector >& points = env.GetSortedSegs(); + const vector >& points = env.GetSortedSegs(); for (int i = 0; i < points.size(); ++i) os << (i==0 ? "" : "|") << "x=" << points[i]->x << ",b=" << points[i]->b << ",m=" << points[i]->m << ",p1=" << points[i]->p1 << ",p2=" << points[i]->p2; return os << '>'; @@ -37,7 +36,7 @@ ostream& operator<<(ostream& os, const ConvexHull& env) { #ifdef ORIGINAL_MERT_IMPLEMENTATION struct SlopeCompare { - bool operator() (const shared_ptr& a, const shared_ptr& b) const { + bool operator() (const boost::shared_ptr& a, const boost::shared_ptr& b) const { return a->m < b->m; } }; @@ -93,7 +92,7 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) { if (this->IsEdgeEnvelope()) { // if (other.size() > 1) // cerr << *this << " (TIMES) " << other << endl; - shared_ptr edge_parent = points[0]; + boost::shared_ptr edge_parent = points[0]; const double& edge_b = edge_parent->b; const double& edge_m = edge_parent->m; points.clear(); @@ -102,13 +101,13 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) { const double m = p.m + edge_m; const double b = p.b + edge_b; const double& x = p.x; // x's don't change with * - points.push_back(shared_ptr(new MERTPoint(x, m, b, edge_parent, other.points[i]))); + points.push_back(boost::shared_ptr(new MERTPoint(x, m, b, edge_parent, other.points[i]))); assert(points.back()->p1->edge); } // if (other.size() > 1) // cerr << " = " << *this << endl; } else { - vector > new_points; + vector > new_points; int this_i = 0; int other_i = 0; const int this_size = points.size(); @@ -124,7 +123,7 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) { const double m = this_point.m + other_point.m; const double b = this_point.b + other_point.b; - new_points.push_back(shared_ptr(new MERTPoint(cur_x, m, b, points[this_i], other.points[other_i]))); + new_points.push_back(boost::shared_ptr(new MERTPoint(cur_x, m, b, points[this_i], other.points[other_i]))); int comp = 0; if (this_next_val < other_next_val) comp = -1; else if (this_next_val > other_next_val) comp = 1; diff --git a/extools/extract.cc b/extools/extract.cc index f6c121b4..49542fed 100644 --- a/extools/extract.cc +++ b/extools/extract.cc @@ -131,16 +131,16 @@ lookup_and_append(const map &dict, const K &key, V &output) // phrases if there is more than one annotation. // TODO: support source annotation void Extract::AnnotatePhrasesWithCategoryTypes(const WordID default_cat, - const map< tuple, vector > &types, + const map< boost::tuple, vector > &types, vector* phrases) { const int num_unannotated_phrases = phrases->size(); // have to use num_unannotated_phrases since we may grow the vector for (int i = 0; i < num_unannotated_phrases; ++i) { ParallelSpan& phrase = (*phrases)[i]; vector cats; - lookup_and_append(types, make_tuple(phrase.i1, phrase.i2, phrase.j1, phrase.j2), cats); - lookup_and_append(types, make_tuple((short)-1, (short)-1, phrase.j1, phrase.j2), cats); - lookup_and_append(types, make_tuple(phrase.i1, phrase.i2, (short)-1, (short)-1), cats); + lookup_and_append(types, boost::make_tuple(phrase.i1, phrase.i2, phrase.j1, phrase.j2), cats); + lookup_and_append(types, boost::make_tuple((short)-1, (short)-1, phrase.j1, phrase.j2), cats); + lookup_and_append(types, boost::make_tuple(phrase.i1, phrase.i2, (short)-1, (short)-1), cats); if (cats.empty() && default_cat != 0) { cats = vector(1, default_cat); } diff --git a/extools/featurize_grammar.cc b/extools/featurize_grammar.cc index ebae9fdc..78175202 100644 --- a/extools/featurize_grammar.cc +++ b/extools/featurize_grammar.cc @@ -136,8 +136,8 @@ class FERegistry { public: FERegistry() {} boost::shared_ptr Create(const std::string& ffname) const { - map >::const_iterator it = reg_.find(ffname); - shared_ptr res; + map >::const_iterator it = reg_.find(ffname); + boost::shared_ptr res; if (it == reg_.end()) { cerr << "I don't know how to create feature " << ffname << endl; } else { @@ -147,7 +147,7 @@ class FERegistry { } void DisplayList(ostream* out) const { bool first = true; - for (map >::const_iterator it = reg_.begin(); + for (map >::const_iterator it = reg_.begin(); it != reg_.end(); ++it) { if (first) {first=false;} else {*out << ' ';} *out << it->first; diff --git a/extools/sentence_pair.cc b/extools/sentence_pair.cc index d5ebe48f..7d60715a 100644 --- a/extools/sentence_pair.cc +++ b/extools/sentence_pair.cc @@ -71,8 +71,7 @@ int AnnotatedParallelSentence::ReadAlignmentPoint(const char* buf, exit(1); } (*b) = 0; - //TODO: is this what is intended? parses as A && B || C && D. - while(ch < end && (c == 0 && (!permit_col || (permit_col && buf[ch] != ':')) || c != 0 && buf[ch] != '-')) { + while((ch < end) && (c == 0 && (!permit_col || (permit_col && buf[ch] != ':')) || c != 0 && buf[ch] != '-')) { if ((buf[ch] < '0') || (buf[ch] > '9')) { cerr << "Alignment point badly formed 4: " << string(buf, start, end-start) << endl << buf << endl << buf[ch] << endl; exit(1); @@ -151,7 +150,7 @@ void AnnotatedParallelSentence::ParseSpanLabel(const char* buf, int start, int e exit(1); } // cerr << a << " " << b << " " << string(buf,c,end-c) << endl; - span_types[make_tuple(a,b,c,d)].push_back(-TD::Convert(string(buf, ch, end-ch))); + span_types[boost::make_tuple(a,b,c,d)].push_back(-TD::Convert(string(buf, ch, end-ch))); } // INPUT FORMAT diff --git a/gi/pf/align-tl.cc b/gi/pf/align-tl.cc index cbe8c6c8..f6608f1d 100644 --- a/gi/pf/align-tl.cc +++ b/gi/pf/align-tl.cc @@ -58,7 +58,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr prng; +boost::shared_ptr prng; struct LexicalAlignment { unsigned char src_index; diff --git a/gi/pf/brat.cc b/gi/pf/brat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/brat.cc +++ b/gi/pf/brat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr prng; + boost::shared_ptr prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as())); else diff --git a/gi/pf/cfg_wfst_composer.cc b/gi/pf/cfg_wfst_composer.cc index a31b5be8..20520c81 100644 --- a/gi/pf/cfg_wfst_composer.cc +++ b/gi/pf/cfg_wfst_composer.cc @@ -16,7 +16,6 @@ #include "tdict.h" #include "hg.h" -using boost::shared_ptr; namespace po = boost::program_options; using namespace std; using namespace std::tr1; @@ -114,7 +113,7 @@ struct Edge { const Edge* const active_parent; // back pointer, NULL for PREDICT items const Edge* const passive_parent; // back pointer, NULL for SCAN and PREDICT items TRulePtr tps; // translations - shared_ptr > features; // features from CFG rule + boost::shared_ptr > features; // features from CFG rule bool IsPassive() const { // when a rule is completed, this value will be set diff --git a/gi/pf/condnaive.cc b/gi/pf/condnaive.cc index 3ea88016..419731ac 100644 --- a/gi/pf/condnaive.cc +++ b/gi/pf/condnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr prng; +boost::shared_ptr prng; struct ModelAndData { explicit ModelAndData(ConditionalParallelSegementationModel& m, const vector >& ce, const vector >& cf, const set& ve, const set& vf) : diff --git a/gi/pf/dpnaive.cc b/gi/pf/dpnaive.cc index 469dff5c..75ccad72 100644 --- a/gi/pf/dpnaive.cc +++ b/gi/pf/dpnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr prng; +boost::shared_ptr prng; template struct ModelAndData { diff --git a/gi/pf/itg.cc b/gi/pf/itg.cc index a38fe672..29ec3860 100644 --- a/gi/pf/itg.cc +++ b/gi/pf/itg.cc @@ -231,7 +231,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr prng; + boost::shared_ptr prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as())); else diff --git a/gi/pf/learn_cfg.cc b/gi/pf/learn_cfg.cc index ed1772bf..44eaa162 100644 --- a/gi/pf/learn_cfg.cc +++ b/gi/pf/learn_cfg.cc @@ -24,7 +24,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr prng; +boost::shared_ptr prng; vector nt_vocab; vector nt_id_to_index; static unsigned kMAX_RULE_SIZE = 0; diff --git a/gi/pf/pfbrat.cc b/gi/pf/pfbrat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/pfbrat.cc +++ b/gi/pf/pfbrat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr prng; + boost::shared_ptr prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as())); else diff --git a/gi/pf/pfdist.cc b/gi/pf/pfdist.cc index 3d578db2..a3e46064 100644 --- a/gi/pf/pfdist.cc +++ b/gi/pf/pfdist.cc @@ -23,7 +23,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr prng; +boost::shared_ptr prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/gi/pf/pfnaive.cc b/gi/pf/pfnaive.cc index e1a53f5c..958ec4e2 100644 --- a/gi/pf/pfnaive.cc +++ b/gi/pf/pfnaive.cc @@ -25,7 +25,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr prng; +boost::shared_ptr prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc index 91029688..e2b67e17 100644 --- a/gi/pf/pyp_lm.cc +++ b/gi/pf/pyp_lm.cc @@ -25,7 +25,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr prng; +boost::shared_ptr prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/mira/kbest_mira.cc b/mira/kbest_mira.cc index 904eba74..dc0200d6 100644 --- a/mira/kbest_mira.cc +++ b/mira/kbest_mira.cc @@ -3,10 +3,10 @@ #include #include #include +#include #include "config.h" -#include #include #include @@ -27,11 +27,10 @@ #include "sampler.h" using namespace std; -using boost::shared_ptr; namespace po = boost::program_options; bool invert_score; -boost::shared_ptr rng; +std::tr1::shared_ptr rng; void RandomPermutation(int len, vector* p_ids) { vector& ids = *p_ids; @@ -89,15 +88,15 @@ struct HypothesisInfo { }; struct GoodBadOracle { - shared_ptr good; - shared_ptr bad; + std::tr1::shared_ptr good; + std::tr1::shared_ptr bad; }; struct TrainingObserver : public DecoderObserver { TrainingObserver(const int k, const DocScorer& d, bool sf, vector* o) : ds(d), oracles(*o), kbest_size(k), sample_forest(sf) {} const DocScorer& ds; vector& oracles; - shared_ptr cur_best; + std::tr1::shared_ptr cur_best; const int kbest_size; const bool sample_forest; @@ -109,16 +108,16 @@ struct TrainingObserver : public DecoderObserver { UpdateOracles(smeta.GetSentenceID(), *hg); } - shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score) { - shared_ptr h(new HypothesisInfo); + std::tr1::shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score) { + std::tr1::shared_ptr h(new HypothesisInfo); h->features = feats; h->mt_metric = score; return h; } void UpdateOracles(int sent_id, const Hypergraph& forest) { - shared_ptr& cur_good = oracles[sent_id].good; - shared_ptr& cur_bad = oracles[sent_id].bad; + std::tr1::shared_ptr& cur_good = oracles[sent_id].good; + std::tr1::shared_ptr& cur_bad = oracles[sent_id].bad; cur_bad.reset(); // TODO get rid of?? if (sample_forest) { diff --git a/mteval/ns.cc b/mteval/ns.cc index 788f809a..8d354677 100644 --- a/mteval/ns.cc +++ b/mteval/ns.cc @@ -14,7 +14,6 @@ #include "stringlib.h" using namespace std; -using boost::shared_ptr; map EvaluationMetric::instances_; @@ -35,8 +34,8 @@ struct DefaultSegmentEvaluator : public SegmentEvaluator { const EvaluationMetric* em_; }; -shared_ptr EvaluationMetric::CreateSegmentEvaluator(const vector >& refs) const { - return shared_ptr(new DefaultSegmentEvaluator(refs, this)); +boost::shared_ptr EvaluationMetric::CreateSegmentEvaluator(const vector >& refs) const { + return boost::shared_ptr(new DefaultSegmentEvaluator(refs, this)); } #define MAX_SS_VECTOR_SIZE 50 @@ -184,8 +183,8 @@ template struct BleuMetric : public EvaluationMetric { BleuMetric() : EvaluationMetric(BrevityType == IBM ? "IBM_BLEU" : (BrevityType == Koehn ? "KOEHN_BLEU" : "NIST_BLEU")) {} unsigned SufficientStatisticsVectorSize() const { return N*2 + 2; } - shared_ptr CreateSegmentEvaluator(const vector >& refs) const { - return shared_ptr(new BleuSegmentEvaluator(refs, this)); + boost::shared_ptr CreateSegmentEvaluator(const vector >& refs) const { + return boost::shared_ptr(new BleuSegmentEvaluator(refs, this)); } float ComputeBreakdown(const SufficientStats& stats, float* bp, vector* out) const { if (out) { out->clear(); } diff --git a/phrasinator/gibbs_train_plm.cc b/phrasinator/gibbs_train_plm.cc index 3b99e1b6..86fd7865 100644 --- a/phrasinator/gibbs_train_plm.cc +++ b/phrasinator/gibbs_train_plm.cc @@ -269,7 +269,7 @@ struct UniphraseLM { int main(int argc, char** argv) { po::variables_map conf; InitCommandLine(argc, argv, &conf); - shared_ptr prng; + boost::shared_ptr prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as())); else diff --git a/phrasinator/gibbs_train_plm.notables.cc b/phrasinator/gibbs_train_plm.notables.cc index 4b431b90..9dca9e8d 100644 --- a/phrasinator/gibbs_train_plm.notables.cc +++ b/phrasinator/gibbs_train_plm.notables.cc @@ -293,7 +293,7 @@ struct UniphraseLM { int main(int argc, char** argv) { po::variables_map conf; InitCommandLine(argc, argv, &conf); - shared_ptr prng; + boost::shared_ptr prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as())); else diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc index 046e921c..9f12dba9 100644 --- a/training/mpi_batch_optimize.cc +++ b/training/mpi_batch_optimize.cc @@ -29,7 +29,6 @@ namespace mpi = boost::mpi; #include "sparse_vector.h" using namespace std; -using boost::shared_ptr; namespace po = boost::program_options; bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { @@ -270,7 +269,7 @@ int main(int argc, char** argv) { } Weights::InitFromFile(conf["means"].as(), &means); } - shared_ptr o; + boost::shared_ptr o; if (rank == 0) { const string omethod = conf["optimization_method"].as(); if (omethod == "rprop") diff --git a/training/mr_optimize_reduce.cc b/training/mr_optimize_reduce.cc index 15e28fa1..461e6b5f 100644 --- a/training/mr_optimize_reduce.cc +++ b/training/mr_optimize_reduce.cc @@ -15,7 +15,6 @@ #include "sparse_vector.h" using namespace std; -using boost::shared_ptr; namespace po = boost::program_options; void SanityCheck(const vector& w) { @@ -102,7 +101,7 @@ int main(int argc, char** argv) { } Weights::InitFromFile(conf["means"].as(), &means); } - shared_ptr o; + boost::shared_ptr o; const string omethod = conf["optimization_method"].as(); if (omethod == "rprop") o.reset(new RPropOptimizer(num_feats)); // TODO add configuration diff --git a/training/optimize_test.cc b/training/optimize_test.cc index fe7ca70f..bff2ca03 100644 --- a/training/optimize_test.cc +++ b/training/optimize_test.cc @@ -102,7 +102,7 @@ void TestOnline() { size_t N = 20; double C = 1.0; double eta0 = 0.2; - shared_ptr r(new ExponentialDecayLearningRate(N, eta0, 0.85)); + std::tr1::shared_ptr r(new ExponentialDecayLearningRate(N, eta0, 0.85)); //shared_ptr r(new StandardLearningRate(N, eta0)); CumulativeL1OnlineOptimizer opt(r, N, C, std::vector()); assert(r->eta(10) < r->eta(1)); diff --git a/utils/atools.cc b/utils/atools.cc index c0a91731..ba56dd6c 100644 --- a/utils/atools.cc +++ b/utils/atools.cc @@ -12,7 +12,6 @@ namespace po = boost::program_options; using namespace std; -using boost::shared_ptr; struct Command { virtual ~Command() {} @@ -348,10 +347,10 @@ int main(int argc, char **argv) { } } if (line1.empty() && !*in1) break; - shared_ptr > out(new Array2D); - shared_ptr > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); + boost::shared_ptr > out(new Array2D); + boost::shared_ptr > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); if (in2) { - shared_ptr > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); + boost::shared_ptr > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); cmd.Apply(*a1, *a2, out.get()); } else { Array2D dummy; -- cgit v1.2.3 From 225ccd7b5a242e2a4381b71e4c794d82f560f8a3 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 23 Mar 2012 17:47:31 -0400 Subject: fix includes --- klm/util/file.cc | 1 + klm/util/mmap.cc | 1 + 2 files changed, 2 insertions(+) diff --git a/klm/util/file.cc b/klm/util/file.cc index 176737fa..de206bc8 100644 --- a/klm/util/file.cc +++ b/klm/util/file.cc @@ -10,6 +10,7 @@ #include #include #include +#include #if defined(_WIN32) || defined(_WIN64) #include diff --git a/klm/util/mmap.cc b/klm/util/mmap.cc index 3b1c58b8..2db35b56 100644 --- a/klm/util/mmap.cc +++ b/klm/util/mmap.cc @@ -14,6 +14,7 @@ #include #include #include +#include #if defined(_WIN32) || defined(_WIN64) #include -- cgit v1.2.3 From 0c7e078d14dd7078ec4a5b3e77007609aec5e54c Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Fri, 23 Mar 2012 17:48:38 -0400 Subject: pf test --- gi/pf/mh_test.cc | 148 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ gi/pf/pf_test.cc | 148 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ utils/ccrp.h | 6 ++- 3 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 gi/pf/mh_test.cc create mode 100644 gi/pf/pf_test.cc diff --git a/gi/pf/mh_test.cc b/gi/pf/mh_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/mh_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include +#include + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + + Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP(0.8, 0.5)) {} + + double p0(int x) const { + assert(x > 0); + assert(x < 5); + return 1.0/4.0; + } + + double llh() const { + double lh = bp + base.log_crp_prob(); + for (int ctx = 1; ctx < 5; ++ctx) + lh += ccrps[ctx].log_crp_prob(); + return lh; + } + + double prob(int ctx, int x) const { + assert(ctx > 0 && ctx < 5); + return ccrps[ctx].prob(x, base.prob(x, p0(x))); + } + + void increment(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { + if (base.increment(x, p0(x), &rng)) { + bp += log(1.0 / 4.0); + } + } + } + + // this is just a biased estimate + double est_base_prob(int x) { + return (x + 1) * x / 40.0; + } + + void increment_is(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + SampleSet ss; + const int PARTICLES = 25; + vector > s1s(PARTICLES, CCRP(0.5,0.5)); + vector > sbs(PARTICLES, CCRP(0.5,0.5)); + vector sp0s(PARTICLES); + + CCRP s1 = ccrps[ctx]; + CCRP sb = base; + double sp0 = bp; + for (int pp = 0; pp < PARTICLES; ++pp) { + if (pp > 0) { + ccrps[ctx] = s1; + base = sb; + bp = sp0; + } + + double q = 1; + double gamma = 1; + double est_p = est_base_prob(x); + //base.prob(x, p0(x)) + rng.next() * 0.1; + if (ccrps[ctx].increment(x, est_p, &rng, &q)) { + gamma = q * base.prob(x, p0(x)); + q *= est_p; + if (verbose) cerr << "(DP-base draw) "; + double qq = -1; + if (base.increment(x, p0(x), &rng, &qq)) { + if (verbose) cerr << "(G0 draw) "; + bp += log(p0(x)); + qq *= p0(x); + } + } else { gamma = q; } + double w = gamma / q; + if (verbose) + cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; + ss.add(w); + s1s[pp] = ccrps[ctx]; + sbs[pp] = base; + sp0s[pp] = bp; + } + int ps = rng.SelectSample(ss); + ccrps[ctx] = s1s[ps]; + base = sbs[ps]; + bp = sp0s[ps]; + if (verbose) { + cerr << "SELECTED: " << ps << endl; + static int cc = 0; cc++; if (cc ==10) exit(1); + } + } + + void decrement(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].decrement(x, &rng)) { + if (base.decrement(x, &rng)) { + bp -= log(p0(x)); + } + } + } + + double bp; + CCRP base; + vector > ccrps; + +}; + +int main(int argc, char** argv) { + if (argc > 1) { verbose = true; } + vector counts(15, 0); + vector tcounts(15, 0); + int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; + double tlh = 0; + double tt = 0; + for (int n = 0; n < 1000; ++n) { + if (n % 10 == 0) cerr << '.'; + if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; + Model m; + for (int *x = points; *x; x += 2) + m.increment(x[0], x[1]); + + for (int j = 0; j < 24; ++j) { + for (int *x = points; *x; x += 2) { + if (rng.next() < 0.8) { + m.decrement(x[0], x[1]); + m.increment_is(x[0], x[1]); + } + } + } + counts[m.base.num_customers()]++; + tcounts[m.base.num_tables()]++; + tlh += m.llh(); + tt += 1.0; + } + cerr << "mean LLH = " << (tlh / tt) << endl; + for (int i = 0; i < 15; ++i) + cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/gi/pf/pf_test.cc b/gi/pf/pf_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/pf_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include +#include + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + + Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP(0.8, 0.5)) {} + + double p0(int x) const { + assert(x > 0); + assert(x < 5); + return 1.0/4.0; + } + + double llh() const { + double lh = bp + base.log_crp_prob(); + for (int ctx = 1; ctx < 5; ++ctx) + lh += ccrps[ctx].log_crp_prob(); + return lh; + } + + double prob(int ctx, int x) const { + assert(ctx > 0 && ctx < 5); + return ccrps[ctx].prob(x, base.prob(x, p0(x))); + } + + void increment(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { + if (base.increment(x, p0(x), &rng)) { + bp += log(1.0 / 4.0); + } + } + } + + // this is just a biased estimate + double est_base_prob(int x) { + return (x + 1) * x / 40.0; + } + + void increment_is(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + SampleSet ss; + const int PARTICLES = 25; + vector > s1s(PARTICLES, CCRP(0.5,0.5)); + vector > sbs(PARTICLES, CCRP(0.5,0.5)); + vector sp0s(PARTICLES); + + CCRP s1 = ccrps[ctx]; + CCRP sb = base; + double sp0 = bp; + for (int pp = 0; pp < PARTICLES; ++pp) { + if (pp > 0) { + ccrps[ctx] = s1; + base = sb; + bp = sp0; + } + + double q = 1; + double gamma = 1; + double est_p = est_base_prob(x); + //base.prob(x, p0(x)) + rng.next() * 0.1; + if (ccrps[ctx].increment(x, est_p, &rng, &q)) { + gamma = q * base.prob(x, p0(x)); + q *= est_p; + if (verbose) cerr << "(DP-base draw) "; + double qq = -1; + if (base.increment(x, p0(x), &rng, &qq)) { + if (verbose) cerr << "(G0 draw) "; + bp += log(p0(x)); + qq *= p0(x); + } + } else { gamma = q; } + double w = gamma / q; + if (verbose) + cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; + ss.add(w); + s1s[pp] = ccrps[ctx]; + sbs[pp] = base; + sp0s[pp] = bp; + } + int ps = rng.SelectSample(ss); + ccrps[ctx] = s1s[ps]; + base = sbs[ps]; + bp = sp0s[ps]; + if (verbose) { + cerr << "SELECTED: " << ps << endl; + static int cc = 0; cc++; if (cc ==10) exit(1); + } + } + + void decrement(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].decrement(x, &rng)) { + if (base.decrement(x, &rng)) { + bp -= log(p0(x)); + } + } + } + + double bp; + CCRP base; + vector > ccrps; + +}; + +int main(int argc, char** argv) { + if (argc > 1) { verbose = true; } + vector counts(15, 0); + vector tcounts(15, 0); + int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; + double tlh = 0; + double tt = 0; + for (int n = 0; n < 1000; ++n) { + if (n % 10 == 0) cerr << '.'; + if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; + Model m; + for (int *x = points; *x; x += 2) + m.increment(x[0], x[1]); + + for (int j = 0; j < 24; ++j) { + for (int *x = points; *x; x += 2) { + if (rng.next() < 0.8) { + m.decrement(x[0], x[1]); + m.increment_is(x[0], x[1]); + } + } + } + counts[m.base.num_customers()]++; + tcounts[m.base.num_tables()]++; + tlh += m.llh(); + tt += 1.0; + } + cerr << "mean LLH = " << (tlh / tt) << endl; + for (int i = 0; i < 15; ++i) + cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/utils/ccrp.h b/utils/ccrp.h index 390d4994..8635b422 100644 --- a/utils/ccrp.h +++ b/utils/ccrp.h @@ -97,8 +97,10 @@ class CCRP { } // returns +1 or 0 indicating whether a new table was opened + // p = probability with which the particular table was selected + // excluding p0 template - int increment(const Dish& dish, const T& p0, MT19937* rng) { + int increment(const Dish& dish, const T& p0, MT19937* rng, T* p = NULL) { DishLocations& loc = dish_locs_[dish]; bool share_table = false; if (loc.total_dish_count_) { @@ -112,6 +114,7 @@ class CCRP { ti != loc.table_counts_.end(); ++ti) { r -= (*ti - discount_); if (r <= 0.0) { + if (p) { *p = T(*ti - discount_) / T(strength_ + num_customers_); } ++(*ti); break; } @@ -123,6 +126,7 @@ class CCRP { } } else { loc.table_counts_.push_back(1u); + if (p) { *p = T(strength_ + discount_ * num_tables_) / T(strength_ + num_customers_); } ++num_tables_; } ++loc.total_dish_count_; -- cgit v1.2.3 From b6eede632af4fa58a6f5325ee0d059c02a898b9f Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sat, 24 Mar 2012 23:04:46 -0400 Subject: rename aligner, add support for distinguishing translation / transliteration --- decoder/aligner.cc | 4 +- decoder/ff_wordalign.cc | 1 - mteval/aer_scorer.cc | 6 +-- utils/Makefile.am | 2 +- utils/alignment_io.cc | 97 ++++++++++++++++++++++++++++++++++++++++++++++ utils/alignment_io.h | 42 ++++++++++++++++++++ utils/alignment_pharaoh.cc | 77 ------------------------------------ utils/alignment_pharaoh.h | 14 ------- utils/atools.cc | 8 ++-- 9 files changed, 149 insertions(+), 102 deletions(-) create mode 100644 utils/alignment_io.cc create mode 100644 utils/alignment_io.h delete mode 100644 utils/alignment_pharaoh.cc delete mode 100644 utils/alignment_pharaoh.h diff --git a/decoder/aligner.cc b/decoder/aligner.cc index 53e059fb..232e022a 100644 --- a/decoder/aligner.cc +++ b/decoder/aligner.cc @@ -11,7 +11,7 @@ #include "sentence_metadata.h" #include "inside_outside.h" #include "viterbi.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h" using namespace std; @@ -300,7 +300,7 @@ void AlignerTools::WriteAlignment(const Lattice& src_lattice, cerr << grid << endl; } (*out) << TD::GetString(src_sent) << " ||| " << TD::GetString(trg_sent) << " ||| "; - AlignmentPharaoh::SerializePharaohFormat(grid, out); + AlignmentIO::SerializePharaohFormat(grid, out); } }; diff --git a/decoder/ff_wordalign.cc b/decoder/ff_wordalign.cc index 9e7c618e..decdf9bc 100644 --- a/decoder/ff_wordalign.cc +++ b/decoder/ff_wordalign.cc @@ -15,7 +15,6 @@ #include "factored_lexicon_helper.h" #include "verbose.h" -#include "alignment_pharaoh.h" #include "stringlib.h" #include "sentence_metadata.h" #include "hg.h" diff --git a/mteval/aer_scorer.cc b/mteval/aer_scorer.cc index edd4390f..ae3192d4 100644 --- a/mteval/aer_scorer.cc +++ b/mteval/aer_scorer.cc @@ -5,7 +5,7 @@ #include #include "tdict.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h" using namespace std; @@ -85,7 +85,7 @@ AERScorer::AERScorer(const vector >& refs, const string& src) : s cerr << "AERScorer can only take a single reference!\n"; abort(); } - ref_ = AlignmentPharaoh::ReadPharaohAlignmentGrid(TD::GetString(refs.front())); + ref_ = AlignmentIO::ReadPharaohAlignmentGrid(TD::GetString(refs.front())); } static inline bool Safe(const Array2D& a, int i, int j) { @@ -101,7 +101,7 @@ ScoreP AERScorer::ScoreCCandidate(const vector& shyp) const { ScoreP AERScorer::ScoreCandidate(const vector& shyp) const { boost::shared_ptr > hyp = - AlignmentPharaoh::ReadPharaohAlignmentGrid(TD::GetString(shyp)); + AlignmentIO::ReadPharaohAlignmentGrid(TD::GetString(shyp)); int m = 0; int r = 0; diff --git a/utils/Makefile.am b/utils/Makefile.am index 3ea21835..2fc6ae21 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -23,7 +23,7 @@ atools_SOURCES = atools.cc noinst_LIBRARIES = libutils.a libutils_a_SOURCES = \ - alignment_pharaoh.cc \ + alignment_io.cc \ b64tools.cc \ corpus_tools.cc \ dict.cc \ diff --git a/utils/alignment_io.cc b/utils/alignment_io.cc new file mode 100644 index 00000000..1d923f7f --- /dev/null +++ b/utils/alignment_io.cc @@ -0,0 +1,97 @@ +#include "utils/alignment_io.h" + +using namespace std; + +static bool is_digit(char x) { return x >= '0' && x <= '9'; } + +boost::shared_ptr > AlignmentIO::ReadPharaohAlignmentGrid(const string& al) { + int max_x = 0; + int max_y = 0; + int i = 0; + size_t pos = al.rfind(" ||| "); + if (pos != string::npos) { i = pos + 5; } + while (i < al.size()) { + if (al[i] == '\n' || al[i] == '\r') break; + int x = 0; + while(i < al.size() && is_digit(al[i])) { + x *= 10; + x += al[i] - '0'; + ++i; + } + if (x > max_x) max_x = x; + assert(i < al.size()); + if(al[i] != '-') { + cerr << "BAD ALIGNMENT: " << al << endl; + abort(); + } + ++i; + int y = 0; + while(i < al.size() && is_digit(al[i])) { + y *= 10; + y += al[i] - '0'; + ++i; + } + if (y > max_y) max_y = y; + while(i < al.size() && al[i] == ' ') { ++i; } + } + + boost::shared_ptr > grid(new Array2D(max_x + 1, max_y + 1)); + i = 0; + if (pos != string::npos) { i = pos + 5; } + while (i < al.size()) { + if (al[i] == '\n' || al[i] == '\r') break; + int x = 0; + while(i < al.size() && is_digit(al[i])) { + x *= 10; + x += al[i] - '0'; + ++i; + } + assert(i < al.size()); + assert(al[i] == '-'); + ++i; + int y = 0; + while(i < al.size() && is_digit(al[i])) { + y *= 10; + y += al[i] - '0'; + ++i; + } + (*grid)(x, y) = true; + while(i < al.size() && al[i] == ' ') { ++i; } + } + // cerr << *grid << endl; + return grid; +} + +void AlignmentIO::SerializePharaohFormat(const Array2D& alignment, ostream* o) { + ostream& out = *o; + bool need_space = false; + for (int i = 0; i < alignment.width(); ++i) + for (int j = 0; j < alignment.height(); ++j) + if (alignment(i,j)) { + if (need_space) out << ' '; else need_space = true; + out << i << '-' << j; + } + out << endl; +} + +void AlignmentIO::SerializeTypedAlignment(const Array2D& alignment, ostream* o) { + ostream& out = *o; + bool need_space = false; + for (int i = 0; i < alignment.width(); ++i) + for (int j = 0; j < alignment.height(); ++j) { + const AlignmentType& aij = alignment(i,j); + if (aij != kNONE) { + if (need_space) out << ' '; else need_space = true; + if (aij == kTRANSLATION) {} + else if (aij == kTRANSLITERATION) { + out << 'T' << ':'; + } else { + cerr << "\nUnexpected alignment point type: " << static_cast(aij) << endl; + abort(); + } + out << i << '-' << j; + } + } + out << endl; +} + diff --git a/utils/alignment_io.h b/utils/alignment_io.h new file mode 100644 index 00000000..36bcecd7 --- /dev/null +++ b/utils/alignment_io.h @@ -0,0 +1,42 @@ +#ifndef _ALIGNMENT_IO_H_ +#define _ALIGNMENT_IO_H_ + +#include +#include +#include +#include "array2d.h" + +struct AlignmentIO { + enum AlignmentType { kNONE = 0, kTRANSLATION = 1, kTRANSLITERATION = 2 }; + + static boost::shared_ptr > ReadPharaohAlignmentGrid(const std::string& al); + static void SerializePharaohFormat(const Array2D& alignment, std::ostream* out); + static void SerializeTypedAlignment(const Array2D& alignment, std::ostream* out); +}; + +inline std::ostream& operator<<(std::ostream& os, const Array2D& m) { + os << ' '; + for (int j=0; j - -using namespace std; - -static bool is_digit(char x) { return x >= '0' && x <= '9'; } - -boost::shared_ptr > AlignmentPharaoh::ReadPharaohAlignmentGrid(const string& al) { - int max_x = 0; - int max_y = 0; - int i = 0; - size_t pos = al.rfind(" ||| "); - if (pos != string::npos) { i = pos + 5; } - while (i < al.size()) { - if (al[i] == '\n' || al[i] == '\r') break; - int x = 0; - while(i < al.size() && is_digit(al[i])) { - x *= 10; - x += al[i] - '0'; - ++i; - } - if (x > max_x) max_x = x; - assert(i < al.size()); - if(al[i] != '-') { - cerr << "BAD ALIGNMENT: " << al << endl; - abort(); - } - ++i; - int y = 0; - while(i < al.size() && is_digit(al[i])) { - y *= 10; - y += al[i] - '0'; - ++i; - } - if (y > max_y) max_y = y; - while(i < al.size() && al[i] == ' ') { ++i; } - } - - boost::shared_ptr > grid(new Array2D(max_x + 1, max_y + 1)); - i = 0; - if (pos != string::npos) { i = pos + 5; } - while (i < al.size()) { - if (al[i] == '\n' || al[i] == '\r') break; - int x = 0; - while(i < al.size() && is_digit(al[i])) { - x *= 10; - x += al[i] - '0'; - ++i; - } - assert(i < al.size()); - assert(al[i] == '-'); - ++i; - int y = 0; - while(i < al.size() && is_digit(al[i])) { - y *= 10; - y += al[i] - '0'; - ++i; - } - (*grid)(x, y) = true; - while(i < al.size() && al[i] == ' ') { ++i; } - } - // cerr << *grid << endl; - return grid; -} - -void AlignmentPharaoh::SerializePharaohFormat(const Array2D& alignment, ostream* out) { - bool need_space = false; - for (int i = 0; i < alignment.width(); ++i) - for (int j = 0; j < alignment.height(); ++j) - if (alignment(i,j)) { - if (need_space) (*out) << ' '; else need_space = true; - (*out) << i << '-' << j; - } - (*out) << endl; -} - diff --git a/utils/alignment_pharaoh.h b/utils/alignment_pharaoh.h deleted file mode 100644 index d111c8bf..00000000 --- a/utils/alignment_pharaoh.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _PHARAOH_ALIGNMENT_H_ -#define _PHARAOH_ALIGNMENT_H_ - -#include -#include -#include -#include "array2d.h" - -struct AlignmentPharaoh { - static boost::shared_ptr > ReadPharaohAlignmentGrid(const std::string& al); - static void SerializePharaohFormat(const Array2D& alignment, std::ostream* out); -}; - -#endif diff --git a/utils/atools.cc b/utils/atools.cc index ba56dd6c..bce7822e 100644 --- a/utils/atools.cc +++ b/utils/atools.cc @@ -8,7 +8,7 @@ #include #include "filelib.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h" namespace po = boost::program_options; using namespace std; @@ -348,9 +348,9 @@ int main(int argc, char **argv) { } if (line1.empty() && !*in1) break; boost::shared_ptr > out(new Array2D); - boost::shared_ptr > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); + boost::shared_ptr > a1 = AlignmentIO::ReadPharaohAlignmentGrid(line1); if (in2) { - boost::shared_ptr > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); + boost::shared_ptr > a2 = AlignmentIO::ReadPharaohAlignmentGrid(line2); cmd.Apply(*a1, *a2, out.get()); } else { Array2D dummy; @@ -358,7 +358,7 @@ int main(int argc, char **argv) { } if (cmd.Result() == 1) { - AlignmentPharaoh::SerializePharaohFormat(*out, &cout); + AlignmentIO::SerializePharaohFormat(*out, &cout); } } if (cmd.Result() == 2) -- cgit v1.2.3 From bf4a7606151301dba49265e91c289f2caab2b7ec Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Mon, 2 Apr 2012 23:48:19 -0400 Subject: fix bug in lattices with OOVs --- decoder/grammar.cc | 24 +++++++++++--------- decoder/grammar.h | 2 -- rst_parser/Makefile.am | 16 +++++++++++++ rst_parser/arc_factored.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++ rst_parser/mst_train.cc | 11 +++++++++ rst_parser/rst.cc | 2 ++ rst_parser/rst.h | 7 ++++++ 7 files changed, 107 insertions(+), 13 deletions(-) create mode 100644 rst_parser/Makefile.am create mode 100644 rst_parser/arc_factored.h create mode 100644 rst_parser/mst_train.cc create mode 100644 rst_parser/rst.cc create mode 100644 rst_parser/rst.h diff --git a/decoder/grammar.cc b/decoder/grammar.cc index 9e4065a6..714390f0 100644 --- a/decoder/grammar.cc +++ b/decoder/grammar.cc @@ -3,12 +3,14 @@ #include #include #include +#include #include "rule_lexer.h" #include "filelib.h" #include "tdict.h" using namespace std; +using namespace std::tr1; const vector Grammar::NO_RULES; @@ -148,24 +150,24 @@ bool GlueGrammar::HasRuleForSpan(int i, int /* j */, int /* distance */) const { return (i == 0); } -PassThroughGrammar::PassThroughGrammar(const Lattice& input, const string& cat, const unsigned int ctf_level) : - has_rule_(input.size() + 1) { +PassThroughGrammar::PassThroughGrammar(const Lattice& input, const string& cat, const unsigned int ctf_level) { + unordered_set ss; for (int i = 0; i < input.size(); ++i) { const vector& alts = input[i]; for (int k = 0; k < alts.size(); ++k) { const int j = alts[k].dist2next + i; - has_rule_[i].insert(j); const string& src = TD::Convert(alts[k].label); - TRulePtr pt(new TRule("[" + cat + "] ||| " + src + " ||| " + src + " ||| PassThrough=1")); - pt->a_.push_back(AlignmentPoint(0,0)); - AddRule(pt); - RefineRule(pt, ctf_level); + if (ss.count(alts[k].label) == 0) { + TRulePtr pt(new TRule("[" + cat + "] ||| " + src + " ||| " + src + " ||| PassThrough=1")); + pt->a_.push_back(AlignmentPoint(0,0)); + AddRule(pt); + RefineRule(pt, ctf_level); + ss.insert(alts[k].label); + } } } } -bool PassThroughGrammar::HasRuleForSpan(int i, int j, int /* distance */) const { - const set& hr = has_rule_[i]; - if (i == j) { return !hr.empty(); } - return (hr.find(j) != hr.end()); +bool PassThroughGrammar::HasRuleForSpan(int, int, int distance) const { + return (distance < 2); } diff --git a/decoder/grammar.h b/decoder/grammar.h index f5d00817..e6a15a69 100644 --- a/decoder/grammar.h +++ b/decoder/grammar.h @@ -91,8 +91,6 @@ struct GlueGrammar : public TextGrammar { struct PassThroughGrammar : public TextGrammar { PassThroughGrammar(const Lattice& input, const std::string& cat, const unsigned int ctf_level=0); virtual bool HasRuleForSpan(int i, int j, int distance) const; - private: - std::vector > has_rule_; // index by [i][j] }; void RefineRule(TRulePtr pt, const unsigned int ctf_level); diff --git a/rst_parser/Makefile.am b/rst_parser/Makefile.am new file mode 100644 index 00000000..fef1c1a2 --- /dev/null +++ b/rst_parser/Makefile.am @@ -0,0 +1,16 @@ +bin_PROGRAMS = \ + mst_train + +noinst_PROGRAMS = \ + rst_test + +TESTS = rst_test + +noinst_LIBRARIES = librst.a + +librst_a_SOURCES = rst.cc + +mst_train_SOURCES = mst_train.cc +mst_train_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -I$(top_srcdir)/utils -I$(top_srcdir)/mteval -I../klm diff --git a/rst_parser/arc_factored.h b/rst_parser/arc_factored.h new file mode 100644 index 00000000..312d7d67 --- /dev/null +++ b/rst_parser/arc_factored.h @@ -0,0 +1,58 @@ +#ifndef _ARC_FACTORED_H_ +#define _ARC_FACTORED_H_ + +#include +#include +#include "array2d.h" +#include "sparse_vector.h" + +class ArcFactoredForest { + public: + explicit ArcFactoredForest(short num_words) : + num_words_(num_words), + root_edges_(num_words), + edges_(num_words, num_words) {} + + struct Edge { + Edge() : features(), edge_prob(prob_t::Zero()) {} + SparseVector features; + prob_t edge_prob; + }; + + template + void Reweight(const V& weights) { + for (int m = 0; m < num_words_; ++m) { + for (int h = 0; h < num_words_; ++h) { + if (h != m) { + Edge& e = edges_(h, m); + e.edge_prob.logeq(e.features.dot(weights)); + } + } + if (m) { + Edge& e = root_edges_[m]; + e.edge_prob.logeq(e.features.dot(weights)); + } + } + } + + const Edge& operator()(short h, short m) const { + assert(m > 0); + assert(m <= num_words_); + assert(h >= 0); + assert(h <= num_words_); + return h ? edges_(h - 1, m - 1) : root_edges[m - 1]; + } + Edge& operator()(short h, short m) { + assert(m > 0); + assert(m <= num_words_); + assert(h >= 0); + assert(h <= num_words_); + return h ? edges_(h - 1, m - 1) : root_edges[m - 1]; + } + private: + unsigned num_words_; + std::vector root_edges_; + Array2D edges_; +}; + +#endif diff --git a/rst_parser/mst_train.cc b/rst_parser/mst_train.cc new file mode 100644 index 00000000..1bceaff5 --- /dev/null +++ b/rst_parser/mst_train.cc @@ -0,0 +1,11 @@ +#include "arc_factored.h" + +#include + +using namespace std; + +int main(int argc, char** argv) { + ArcFactoredForest af(5); + return 0; +} + diff --git a/rst_parser/rst.cc b/rst_parser/rst.cc new file mode 100644 index 00000000..0ab3e296 --- /dev/null +++ b/rst_parser/rst.cc @@ -0,0 +1,2 @@ +#include "rst.h" + diff --git a/rst_parser/rst.h b/rst_parser/rst.h new file mode 100644 index 00000000..30a1f8a4 --- /dev/null +++ b/rst_parser/rst.h @@ -0,0 +1,7 @@ +#ifndef _RST_H_ +#define _RST_H_ + +struct RandomSpanningTree { +}; + +#endif -- cgit v1.2.3 From 2bd8136c89cf39d828087d38034eef72190dda08 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Mon, 2 Apr 2012 23:49:30 -0400 Subject: make sure i don't break build by adding RST parser stubs --- rst_parser/Makefile.am | 5 +++- rst_parser/arc_factored.cc | 31 ++++++++++++++++++++ rst_parser/arc_factored.h | 72 ++++++++++++++++++++++++++++++++-------------- rst_parser/mst_train.cc | 1 + rst_parser/rst.cc | 5 ++++ rst_parser/rst.h | 5 +++- rst_parser/rst_test.cc | 33 +++++++++++++++++++++ 7 files changed, 129 insertions(+), 23 deletions(-) create mode 100644 rst_parser/arc_factored.cc create mode 100644 rst_parser/rst_test.cc diff --git a/rst_parser/Makefile.am b/rst_parser/Makefile.am index fef1c1a2..e97ab5c5 100644 --- a/rst_parser/Makefile.am +++ b/rst_parser/Makefile.am @@ -8,9 +8,12 @@ TESTS = rst_test noinst_LIBRARIES = librst.a -librst_a_SOURCES = rst.cc +librst_a_SOURCES = arc_factored.cc rst.cc mst_train_SOURCES = mst_train.cc mst_train_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz +rst_test_SOURCES = rst_test.cc +rst_test_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -I$(top_srcdir)/utils -I$(top_srcdir)/mteval -I../klm diff --git a/rst_parser/arc_factored.cc b/rst_parser/arc_factored.cc new file mode 100644 index 00000000..1e75600b --- /dev/null +++ b/rst_parser/arc_factored.cc @@ -0,0 +1,31 @@ +#include "arc_factored.h" + +#include + +#include + +using namespace std; +using namespace boost; + +// based on Trajan 1977 +void ArcFactoredForest::MaximumSpanningTree(SpanningTree* st) const { + typedef disjoint_sets_with_storage DisjointSet; + DisjointSet strongly(num_words_ + 1); + DisjointSet weakly(num_words_ + 1); + set roots, h, rset; + vector > enter(num_words_ + 1); + for (unsigned i = 0; i <= num_words_; ++i) { + strongly.make_set(i); + weakly.make_set(i); + roots.insert(i); + } + while(!roots.empty()) { + set::iterator it = roots.begin(); + const unsigned k = *it; + roots.erase(it); + cerr << "k=" << k << endl; + pair ij; // TODO = Max(k); + } +} + diff --git a/rst_parser/arc_factored.h b/rst_parser/arc_factored.h index 312d7d67..e99be482 100644 --- a/rst_parser/arc_factored.h +++ b/rst_parser/arc_factored.h @@ -1,58 +1,88 @@ #ifndef _ARC_FACTORED_H_ #define _ARC_FACTORED_H_ -#include +#include #include +#include +#include #include "array2d.h" #include "sparse_vector.h" +#include "prob.h" +#include "weights.h" + +struct SpanningTree { + SpanningTree() : roots(1, -1) {} + std::vector roots; // unless multiroot trees are supported, this + // will have a single member + std::vector > h_m_pairs; +}; class ArcFactoredForest { public: explicit ArcFactoredForest(short num_words) : num_words_(num_words), root_edges_(num_words), - edges_(num_words, num_words) {} + edges_(num_words, num_words) { + for (int h = 0; h < num_words; ++h) { + for (int m = 0; m < num_words; ++m) { + edges_(h, m).h = h + 1; + edges_(h, m).m = m + 1; + } + root_edges_[h].h = 0; + root_edges_[h].m = h + 1; + } + } + + // compute the maximum spanning tree based on the current weighting + // using the O(n^2) CLE algorithm + void MaximumSpanningTree(SpanningTree* st) const; struct Edge { - Edge() : features(), edge_prob(prob_t::Zero()) {} + Edge() : h(), m(), features(), edge_prob(prob_t::Zero()) {} + short h; + short m; SparseVector features; prob_t edge_prob; }; - template - void Reweight(const V& weights) { - for (int m = 0; m < num_words_; ++m) { - for (int h = 0; h < num_words_; ++h) { - if (h != m) { - Edge& e = edges_(h, m); - e.edge_prob.logeq(e.features.dot(weights)); - } - } - if (m) { - Edge& e = root_edges_[m]; - e.edge_prob.logeq(e.features.dot(weights)); - } - } - } - const Edge& operator()(short h, short m) const { assert(m > 0); assert(m <= num_words_); assert(h >= 0); assert(h <= num_words_); - return h ? edges_(h - 1, m - 1) : root_edges[m - 1]; + return h ? edges_(h - 1, m - 1) : root_edges_[m - 1]; } + Edge& operator()(short h, short m) { assert(m > 0); assert(m <= num_words_); assert(h >= 0); assert(h <= num_words_); - return h ? edges_(h - 1, m - 1) : root_edges[m - 1]; + return h ? edges_(h - 1, m - 1) : root_edges_[m - 1]; + } + + template + void Reweight(const V& weights) { + for (int m = 0; m < num_words_; ++m) { + for (int h = 0; h < num_words_; ++h) { + if (h != m) { + Edge& e = edges_(h, m); + e.edge_prob.logeq(e.features.dot(weights)); + } + } + Edge& e = root_edges_[m]; + e.edge_prob.logeq(e.features.dot(weights)); + } } + private: unsigned num_words_; std::vector root_edges_; Array2D edges_; }; +inline std::ostream& operator<<(std::ostream& os, const ArcFactoredForest::Edge& edge) { + return os << "(" << edge.h << " < " << edge.m << ")"; +} + #endif diff --git a/rst_parser/mst_train.cc b/rst_parser/mst_train.cc index 1bceaff5..7b5af4c1 100644 --- a/rst_parser/mst_train.cc +++ b/rst_parser/mst_train.cc @@ -6,6 +6,7 @@ using namespace std; int main(int argc, char** argv) { ArcFactoredForest af(5); + cerr << af(0,3) << endl; return 0; } diff --git a/rst_parser/rst.cc b/rst_parser/rst.cc index 0ab3e296..f6b295b3 100644 --- a/rst_parser/rst.cc +++ b/rst_parser/rst.cc @@ -1,2 +1,7 @@ #include "rst.h" +using namespace std; + +StochasticForest::StochasticForest(const ArcFactoredForest& af) { +} + diff --git a/rst_parser/rst.h b/rst_parser/rst.h index 30a1f8a4..865871eb 100644 --- a/rst_parser/rst.h +++ b/rst_parser/rst.h @@ -1,7 +1,10 @@ #ifndef _RST_H_ #define _RST_H_ -struct RandomSpanningTree { +#include "arc_factored.h" + +struct StochasticForest { + explicit StochasticForest(const ArcFactoredForest& af); }; #endif diff --git a/rst_parser/rst_test.cc b/rst_parser/rst_test.cc new file mode 100644 index 00000000..e8fe706e --- /dev/null +++ b/rst_parser/rst_test.cc @@ -0,0 +1,33 @@ +#include "arc_factored.h" + +#include + +using namespace std; + +int main(int argc, char** argv) { + // John saw Mary + // (H -> M) + // (1 -> 2) 20 + // (1 -> 3) 3 + // (2 -> 1) 20 + // (2 -> 3) 30 + // (3 -> 2) 0 + // (3 -> 1) 11 + // (0, 2) 10 + // (0, 1) 9 + // (0, 3) 9 + ArcFactoredForest af(3); + af(1,2).edge_prob.logeq(20); + af(1,3).edge_prob.logeq(3); + af(2,1).edge_prob.logeq(20); + af(2,3).edge_prob.logeq(30); + af(3,2).edge_prob.logeq(0); + af(3,1).edge_prob.logeq(11); + af(0,2).edge_prob.logeq(10); + af(0,1).edge_prob.logeq(9); + af(0,3).edge_prob.logeq(9); + SpanningTree tree; + af.MaximumSpanningTree(&tree); + return 0; +} + -- cgit v1.2.3 From 6001b81eba37985d2e7dea6e6ebb488b787789a6 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 3 Apr 2012 02:08:33 -0400 Subject: bayes lattice scoring --- decoder/hg_io.cc | 20 +++ decoder/hg_io.h | 1 + gi/pf/Makefile.am | 5 +- gi/pf/bayes_lattice_score.cc | 309 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 334 insertions(+), 1 deletion(-) create mode 100644 gi/pf/bayes_lattice_score.cc diff --git a/decoder/hg_io.cc b/decoder/hg_io.cc index 9f0f50fa..d416dbf6 100644 --- a/decoder/hg_io.cc +++ b/decoder/hg_io.cc @@ -401,6 +401,26 @@ string HypergraphIO::AsPLF(const Hypergraph& hg, bool include_global_parentheses return os.str(); } +string HypergraphIO::AsPLF(const Lattice& lat, bool include_global_parentheses) { + static bool first = true; + if (first) { InitEscapes(); first = false; } + if (lat.empty()) return "()"; + ostringstream os; + if (include_global_parentheses) os << '('; + static const string EPS="*EPS*"; + for (int i = 0; i < lat.size(); ++i) { + const vector arcs = lat[i]; + os << '('; + for (int j = 0; j < arcs.size(); ++j) { + os << "('" << Escape(TD::Convert(arcs[j].label)) << "'," + << arcs[j].cost << ',' << arcs[j].dist2next << "),"; + } + os << "),"; + } + if (include_global_parentheses) os << ')'; + return os.str(); +} + namespace PLF { const string chars = "'\\"; diff --git a/decoder/hg_io.h b/decoder/hg_io.h index 44817157..4e502a0c 100644 --- a/decoder/hg_io.h +++ b/decoder/hg_io.h @@ -30,6 +30,7 @@ struct HypergraphIO { static void ReadFromPLF(const std::string& in, Hypergraph* out, int line = 0); // return PLF string representation (undefined behavior on non-lattices) static std::string AsPLF(const Hypergraph& hg, bool include_global_parentheses = true); + static std::string AsPLF(const Lattice& lat, bool include_global_parentheses = true); static void PLFtoLattice(const std::string& plf, Lattice* pl); static std::string Escape(const std::string& s); // PLF helper }; diff --git a/gi/pf/Makefile.am b/gi/pf/Makefile.am index d365016b..86f8e07b 100644 --- a/gi/pf/Makefile.am +++ b/gi/pf/Makefile.am @@ -1,9 +1,12 @@ -bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test +bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test bayes_lattice_score noinst_LIBRARIES = libpf.a libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc hpyp_tm.cc pyp_tm.cc +bayes_lattice_score_SOURCES = bayes_lattice_score.cc +bayes_lattice_score_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz + pf_test_SOURCES = pf_test.cc pf_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz diff --git a/gi/pf/bayes_lattice_score.cc b/gi/pf/bayes_lattice_score.cc new file mode 100644 index 00000000..70cb8dc2 --- /dev/null +++ b/gi/pf/bayes_lattice_score.cc @@ -0,0 +1,309 @@ +#include +#include + +#include +#include +#include + +#include "inside_outside.h" +#include "hg.h" +#include "hg_io.h" +#include "bottom_up_parser.h" +#include "fdict.h" +#include "grammar.h" +#include "m.h" +#include "trule.h" +#include "tdict.h" +#include "filelib.h" +#include "dict.h" +#include "sampler.h" +#include "ccrp.h" +#include "ccrp_onetable.h" + +using namespace std; +using namespace tr1; +namespace po = boost::program_options; + +boost::shared_ptr prng; + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("samples,s",po::value()->default_value(1000),"Number of samples") + ("input,i",po::value(),"Read parallel data from") + ("random_seed,S",po::value(), "Random seed"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || (conf->count("input") == 0)) { + cerr << dcmdline_options << endl; + exit(1); + } +} + +unsigned ReadCorpus(const string& filename, + vector* e, + set* vocab_e) { + e->clear(); + vocab_e->clear(); + ReadFile rf(filename); + istream* in = rf.stream(); + assert(*in); + string line; + unsigned toks = 0; + while(*in) { + getline(*in, line); + if (line.empty() && !*in) break; + e->push_back(Lattice()); + Lattice& le = e->back(); + LatticeTools::ConvertTextOrPLF(line, & le); + for (unsigned i = 0; i < le.size(); ++i) + for (unsigned j = 0; j < le[i].size(); ++j) + vocab_e->insert(le[i][j].label); + toks += le.size(); + } + return toks; +} + +struct BaseModel { + explicit BaseModel(unsigned tc) : + unif(1.0 / tc), p(prob_t::One()) {} + prob_t prob(const TRule& r) const { + return unif; + } + void increment(const TRule& r, MT19937* rng) { + p *= prob(r); + } + void decrement(const TRule& r, MT19937* rng) { + p /= prob(r); + } + prob_t Likelihood() const { + return p; + } + const prob_t unif; + prob_t p; +}; + +struct UnigramModel { + explicit UnigramModel(unsigned tc) : base(tc), crp(1,1,1,1), glue(1,1,1,1) {} + BaseModel base; + CCRP crp; + CCRP glue; + + prob_t Prob(const TRule& r) const { + if (r.Arity() != 0) { + return glue.prob(r, prob_t(0.5)); + } + return crp.prob(r, base.prob(r)); + } + + int Increment(const TRule& r, MT19937* rng) { + if (r.Arity() != 0) { + glue.increment(r, 0.5, rng); + return 0; + } else { + if (crp.increment(r, base.prob(r), rng)) { + base.increment(r, rng); + return 1; + } + return 0; + } + } + + int Decrement(const TRule& r, MT19937* rng) { + if (r.Arity() != 0) { + glue.decrement(r, rng); + return 0; + } else { + if (crp.decrement(r, rng)) { + base.decrement(r, rng); + return -1; + } + return 0; + } + } + + prob_t Likelihood() const { + prob_t p; + p.logeq(crp.log_crp_prob() + glue.log_crp_prob()); + p *= base.Likelihood(); + return p; + } + + void ResampleHyperparameters(MT19937* rng) { + crp.resample_hyperparameters(rng); + glue.resample_hyperparameters(rng); + cerr << " d=" << crp.discount() << ", s=" << crp.strength() << "\t STOP d=" << glue.discount() << ", s=" << glue.strength() << endl; + } +}; + +UnigramModel* plm; + +void SampleDerivation(const Hypergraph& hg, MT19937* rng, vector* sampled_deriv) { + vector node_probs; + Inside(hg, &node_probs); + queue q; + q.push(hg.nodes_.size() - 2); + while(!q.empty()) { + unsigned cur_node_id = q.front(); +// cerr << "NODE=" << cur_node_id << endl; + q.pop(); + const Hypergraph::Node& node = hg.nodes_[cur_node_id]; + const unsigned num_in_edges = node.in_edges_.size(); + unsigned sampled_edge = 0; + if (num_in_edges == 1) { + sampled_edge = node.in_edges_[0]; + } else { + //prob_t z; + assert(num_in_edges > 1); + SampleSet ss; + for (unsigned j = 0; j < num_in_edges; ++j) { + const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; + prob_t p = edge.edge_prob_; + for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) + p *= node_probs[edge.tail_nodes_[k]]; + ss.add(p); +// cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; + //z += p; + } +// for (unsigned j = 0; j < num_in_edges; ++j) { +// const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; +// cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; +// } +// cerr << " --- \n"; + sampled_edge = node.in_edges_[rng->SelectSample(ss)]; + } + sampled_deriv->push_back(sampled_edge); + const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; + for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { + q.push(edge.tail_nodes_[j]); + } + } +// for (unsigned i = 0; i < sampled_deriv->size(); ++i) { +// cerr << *hg.edges_[(*sampled_deriv)[i]].rule_ << endl; +// } +} + +void IncrementDerivation(const Hypergraph& hg, const vector& d, UnigramModel* plm, MT19937* rng) { + for (unsigned i = 0; i < d.size(); ++i) + plm->Increment(*hg.edges_[d[i]].rule_, rng); +} + +void DecrementDerivation(const Hypergraph& hg, const vector& d, UnigramModel* plm, MT19937* rng) { + for (unsigned i = 0; i < d.size(); ++i) + plm->Decrement(*hg.edges_[d[i]].rule_, rng); +} + +prob_t TotalProb(const Hypergraph& hg) { + return Inside(hg); +} + +void IncrementLatticePath(const Hypergraph& hg, const vector& d, Lattice* pl) { + Lattice& lat = *pl; + for (int i = 0; i < d.size(); ++i) { + const Hypergraph::Edge& edge = hg.edges_[d[i]]; + if (edge.rule_->Arity() != 0) continue; + WordID sym = edge.rule_->e_[0]; + vector& las = lat[edge.i_]; + int dist = edge.j_ - edge.i_; + assert(dist > 0); + for (int j = 0; j < las.size(); ++j) { + if (las[j].dist2next == dist && + las[j].label == sym) { + las[j].cost += 1; + } + } + } +} + +int main(int argc, char** argv) { + po::variables_map conf; + + InitCommandLine(argc, argv, &conf); + vector grammars(2); + grammars[0].reset(new GlueGrammar("S","X")); + const unsigned samples = conf["samples"].as(); + + if (conf.count("random_seed")) + prng.reset(new MT19937(conf["random_seed"].as())); + else + prng.reset(new MT19937); + MT19937& rng = *prng; + vector corpuse; + set vocabe; + cerr << "Reading corpus...\n"; + const unsigned toks = ReadCorpus(conf["input"].as(), &corpuse, &vocabe); + cerr << "E-corpus size: " << corpuse.size() << " lattices\t (" << vocabe.size() << " word types)\n"; + UnigramModel lm(vocabe.size()); + vector hgs(corpuse.size()); + vector > derivs(corpuse.size()); + for (int i = 0; i < corpuse.size(); ++i) { + grammars[1].reset(new PassThroughGrammar(corpuse[i], "X")); + ExhaustiveBottomUpParser parser("S", grammars); + bool res = parser.Parse(corpuse[i], &hgs[i]); // exhaustive parse + assert(res); + } + + double csamples = 0; + for (int SS=0; SS < samples; ++SS) { + const bool is_last = ((samples - 1) == SS); + prob_t dlh = prob_t::One(); + bool record_sample = (SS > (samples * 1 / 3) && (SS % 5 == 3)); + if (record_sample) csamples++; + for (int ci = 0; ci < corpuse.size(); ++ci) { + Lattice& lat = corpuse[ci]; + Hypergraph& hg = hgs[ci]; + vector& d = derivs[ci]; + if (!is_last) DecrementDerivation(hg, d, &lm, &rng); + for (unsigned i = 0; i < hg.edges_.size(); ++i) { + TRule& r = *hg.edges_[i].rule_; + if (r.Arity() != 0) + hg.edges_[i].edge_prob_ = prob_t::One(); + else + hg.edges_[i].edge_prob_ = lm.Prob(r); + } + if (!is_last) { + d.clear(); + SampleDerivation(hg, &rng, &d); + IncrementDerivation(hg, derivs[ci], &lm, &rng); + } else { + prob_t p = TotalProb(hg); + dlh *= p; + cerr << " p(sentence) = " << log(p) << "\t" << log(dlh) << endl; + } + if (record_sample) IncrementLatticePath(hg, derivs[ci], &lat); + } + double llh = log(lm.Likelihood()); + cerr << "LLH=" << llh << "\tENTROPY=" << (-llh / log(2) / toks) << "\tPPL=" << pow(2, -llh / log(2) / toks) << endl; + if (SS % 10 == 9) lm.ResampleHyperparameters(&rng); + if (is_last) { + double z = log(dlh); + cerr << "TOTAL_PROB=" << z << "\tENTROPY=" << (-z / log(2) / toks) << "\tPPL=" << pow(2, -z / log(2) / toks) << endl; + } + } + cerr << lm.crp << endl; + cerr << lm.glue << endl; + for (int i = 0; i < corpuse.size(); ++i) { + for (int j = 0; j < corpuse[i].size(); ++j) + for (int k = 0; k < corpuse[i][j].size(); ++k) { + corpuse[i][j][k].cost /= csamples; + corpuse[i][j][k].cost += 1e-3; + corpuse[i][j][k].cost = log(corpuse[i][j][k].cost); + } + cout << HypergraphIO::AsPLF(corpuse[i]) << endl; + } + return 0; +} + -- cgit v1.2.3