From 9aca7f30dda576a453eee64bb4ff0e8bd11a9b85 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sun, 4 Mar 2012 14:33:11 -0500 Subject: clean up pyp lm code --- gi/pf/pyp_lm.cc | 85 ++++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 60 insertions(+), 25 deletions(-) (limited to 'gi') diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc index 0d85536c..88dfcc7c 100644 --- a/gi/pf/pyp_lm.cc +++ b/gi/pf/pyp_lm.cc @@ -11,7 +11,14 @@ #include "tdict.h" #include "sampler.h" #include "ccrp.h" -#include "ccrp_onetable.h" + +// A not very memory-efficient implementation of an N-gram LM based on PYPs +// as described in Y.-W. Teh. (2006) A Hierarchical Bayesian Language Model +// based on Pitman-Yor Processes. In Proc. ACL. + +// I use templates to handle the recursive formalation of the prior, so +// the order of the model has to be specified here, at compile time: +#define kORDER 3 using namespace std; using namespace tr1; @@ -22,8 +29,13 @@ shared_ptr prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() - ("samples,s",po::value()->default_value(1000),"Number of samples") - ("input,i",po::value(),"Read data from") + ("samples,s",po::value()->default_value(300),"Number of samples") + ("train,i",po::value(),"Training data file") + ("test,T",po::value(),"Test data file") + ("discount_prior_a,a",po::value()->default_value(1.0), "discount ~ Beta(a,b): a=this") + ("discount_prior_b,b",po::value()->default_value(1.0), "discount ~ Beta(a,b): b=this") + ("strength_prior_s,s",po::value()->default_value(1.0), "strength ~ Gamma(s,r): s=this") + ("strength_prior_r,r",po::value()->default_value(1.0), "strength ~ Gamma(s,r): r=this") ("random_seed,S",po::value(), "Random seed"); po::options_description clo("Command line options"); clo.add_options() @@ -40,7 +52,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } po::notify(*conf); - if (conf->count("help") || (conf->count("input") == 0)) { + if (conf->count("help") || (conf->count("train") == 0)) { cerr << dcmdline_options << endl; exit(1); } @@ -48,13 +60,13 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { template struct PYPLM; -// uniform base distribution +// uniform base distribution (0-gram model) template<> struct PYPLM<0> { - PYPLM(unsigned vs) : p0(1.0 / vs), draws() {} - void increment(WordID w, const vector& context, MT19937* rng) { ++draws; } - void decrement(WordID w, const vector& context, MT19937* rng) { --draws; assert(draws >= 0); } - double prob(WordID w, const vector& context) const { return p0; } - void resample_hyperparameters(MT19937* rng, const unsigned nloop, const unsigned niterations) {} + PYPLM(unsigned vs, double, double, double, double) : p0(1.0 / vs), draws() {} + void increment(WordID, const vector&, MT19937*) { ++draws; } + void decrement(WordID, const vector&, MT19937*) { --draws; assert(draws >= 0); } + double prob(WordID, const vector&) const { return p0; } + void resample_hyperparameters(MT19937*, const unsigned, const unsigned) {} double log_likelihood() const { return draws * log(p0); } const double p0; int draws; @@ -62,10 +74,13 @@ template<> struct PYPLM<0> { // represents an N-gram LM template struct PYPLM { - PYPLM(unsigned vs) : backoff(vs), d(0.8), alpha(1.0) {} + PYPLM(unsigned vs, double da, double db, double ss, double sr) : + backoff(vs, da, db, ss, sr), + discount_a(da), discount_b(db), + strength_s(ss), strength_r(sr), + d(0.8), alpha(1.0), lookup(N-1) {} void increment(WordID w, const vector& context, MT19937* rng) { const double bo = backoff.prob(w, context); - static vector lookup(N-1); for (unsigned i = 0; i < N-1; ++i) lookup[i] = context[context.size() - 1 - i]; typename unordered_map, CCRP, boost::hash > >::iterator it = p.find(lookup); @@ -75,7 +90,6 @@ template struct PYPLM { backoff.increment(w, context, rng); } void decrement(WordID w, const vector& context, MT19937* rng) { - static vector lookup(N-1); for (unsigned i = 0; i < N-1; ++i) lookup[i] = context[context.size() - 1 - i]; typename unordered_map, CCRP, boost::hash > >::iterator it = p.find(lookup); @@ -85,7 +99,6 @@ template struct PYPLM { } double prob(WordID w, const vector& context) const { const double bo = backoff.prob(w, context); - static vector lookup(N-1); for (unsigned i = 0; i < N-1; ++i) lookup[i] = context[context.size() - 1 - i]; typename unordered_map, CCRP, boost::hash > >::const_iterator it = p.find(lookup); @@ -99,7 +112,9 @@ template struct PYPLM { double log_likelihood(const double& dd, const double& aa) const { if (aa <= -dd) return -std::numeric_limits::infinity(); - double llh = Md::log_beta_density(dd, 1, 1) + Md::log_gamma_density(aa, 1, 1); + //double llh = Md::log_beta_density(dd, 10, 3) + Md::log_gamma_density(aa, 1, 1); + double llh = Md::log_beta_density(dd, discount_a, discount_b) + + Md::log_gamma_density(aa, strength_s, strength_r); typename unordered_map, CCRP, boost::hash > >::const_iterator it; for (it = p.begin(); it != p.end(); ++it) llh += it->second.log_crp_prob(dd, aa); @@ -143,7 +158,9 @@ template struct PYPLM { } PYPLM backoff; + double discount_a, discount_b, strength_s, strength_r; double d, alpha; + mutable vector lookup; // thread-local unordered_map, CCRP, boost::hash > > p; }; @@ -161,14 +178,21 @@ int main(int argc, char** argv) { set vocabe; const WordID kEOS = TD::Convert(""); cerr << "Reading corpus...\n"; - CorpusTools::ReadFromFile(conf["input"].as(), &corpuse, &vocabe); + CorpusTools::ReadFromFile(conf["train"].as(), &corpuse, &vocabe); cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n"; -#define kORDER 3 - PYPLM lm(vocabe.size()); + vector > test; + if (conf.count("test")) + CorpusTools::ReadFromFile(conf["test"].as(), &test); + else + test = corpuse; + PYPLM lm(vocabe.size(), + conf["discount_prior_a"].as(), + conf["discount_prior_b"].as(), + conf["strength_prior_s"].as(), + conf["strength_prior_r"].as()); vector ctx(kORDER - 1, TD::Convert("")); - int mci = corpuse.size() * 99 / 100; for (int SS=0; SS < samples; ++SS) { - for (int ci = 0; ci < mci; ++ci) { + for (int ci = 0; ci < corpuse.size(); ++ci) { ctx.resize(kORDER - 1); const vector& s = corpuse[ci]; for (int i = 0; i <= s.size(); ++i) { @@ -187,22 +211,33 @@ int main(int argc, char** argv) { } double llh = 0; unsigned cnt = 0; - for (int ci = mci; ci < corpuse.size(); ++ci) { + unsigned oovs = 0; + for (int ci = 0; ci < test.size(); ++ci) { ctx.resize(kORDER - 1); - const vector& s = corpuse[ci]; + const vector& s = test[ci]; for (int i = 0; i <= s.size(); ++i) { WordID w = (i < s.size() ? s[i] : kEOS); double lp = log(lm.prob(w, ctx)) / log(2); - cerr << "p(" << TD::Convert(w) << " | " << TD::GetString(ctx) << ") = " << lp << endl; + if (i < s.size() && vocabe.count(w) == 0) { + cerr << "**OOV "; + ++oovs; + lp = 0; + } + cerr << "p(" << TD::Convert(w) << " |"; + for (int j = ctx.size() + 1 - kORDER; j < ctx.size(); ++j) + cerr << ' ' << TD::Convert(ctx[j]); + cerr << ") = " << lp << endl; ctx.push_back(w); llh -= lp; cnt++; } } - cerr << " Log_10 prob: " << (llh * log(2) / log(10)) << endl; - cerr << " Count: " << (cnt) << endl; + cerr << " Log_10 prob: " << (-llh * log(2) / log(10)) << endl; + cerr << " Count: " << cnt << endl; + cerr << " OOVs: " << oovs << endl; cerr << "Cross-entropy: " << (llh / cnt) << endl; cerr << " Perplexity: " << pow(2, llh / cnt) << endl; return 0; } + -- cgit v1.2.3