summaryrefslogtreecommitdiff
path: root/gi/pf
diff options
context:
space:
mode:
authorChris Dyer <cdyer@cs.cmu.edu>2012-03-04 14:33:11 -0500
committerChris Dyer <cdyer@cs.cmu.edu>2012-03-04 14:33:11 -0500
commit3c918889d86fe1deaa5d26162bf85865f1aa33bd (patch)
tree96da9dc29e54893428e401be3152b5a2eebcd742 /gi/pf
parent29ae46010c3610dda877f2d1a07fe942f79bfc31 (diff)
clean up pyp lm code
Diffstat (limited to 'gi/pf')
-rw-r--r--gi/pf/pyp_lm.cc85
1 files changed, 60 insertions, 25 deletions
diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc
index 0d85536c..88dfcc7c 100644
--- a/gi/pf/pyp_lm.cc
+++ b/gi/pf/pyp_lm.cc
@@ -11,7 +11,14 @@
#include "tdict.h"
#include "sampler.h"
#include "ccrp.h"
-#include "ccrp_onetable.h"
+
+// A not very memory-efficient implementation of an N-gram LM based on PYPs
+// as described in Y.-W. Teh. (2006) A Hierarchical Bayesian Language Model
+// based on Pitman-Yor Processes. In Proc. ACL.
+
+// I use templates to handle the recursive formalation of the prior, so
+// the order of the model has to be specified here, at compile time:
+#define kORDER 3
using namespace std;
using namespace tr1;
@@ -22,8 +29,13 @@ shared_ptr<MT19937> prng;
void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
po::options_description opts("Configuration options");
opts.add_options()
- ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples")
- ("input,i",po::value<string>(),"Read data from")
+ ("samples,s",po::value<unsigned>()->default_value(300),"Number of samples")
+ ("train,i",po::value<string>(),"Training data file")
+ ("test,T",po::value<string>(),"Test data file")
+ ("discount_prior_a,a",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): a=this")
+ ("discount_prior_b,b",po::value<double>()->default_value(1.0), "discount ~ Beta(a,b): b=this")
+ ("strength_prior_s,s",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): s=this")
+ ("strength_prior_r,r",po::value<double>()->default_value(1.0), "strength ~ Gamma(s,r): r=this")
("random_seed,S",po::value<uint32_t>(), "Random seed");
po::options_description clo("Command line options");
clo.add_options()
@@ -40,7 +52,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
}
po::notify(*conf);
- if (conf->count("help") || (conf->count("input") == 0)) {
+ if (conf->count("help") || (conf->count("train") == 0)) {
cerr << dcmdline_options << endl;
exit(1);
}
@@ -48,13 +60,13 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
template <unsigned N> struct PYPLM;
-// uniform base distribution
+// uniform base distribution (0-gram model)
template<> struct PYPLM<0> {
- PYPLM(unsigned vs) : p0(1.0 / vs), draws() {}
- void increment(WordID w, const vector<WordID>& context, MT19937* rng) { ++draws; }
- void decrement(WordID w, const vector<WordID>& context, MT19937* rng) { --draws; assert(draws >= 0); }
- double prob(WordID w, const vector<WordID>& context) const { return p0; }
- void resample_hyperparameters(MT19937* rng, const unsigned nloop, const unsigned niterations) {}
+ PYPLM(unsigned vs, double, double, double, double) : p0(1.0 / vs), draws() {}
+ void increment(WordID, const vector<WordID>&, MT19937*) { ++draws; }
+ void decrement(WordID, const vector<WordID>&, MT19937*) { --draws; assert(draws >= 0); }
+ double prob(WordID, const vector<WordID>&) const { return p0; }
+ void resample_hyperparameters(MT19937*, const unsigned, const unsigned) {}
double log_likelihood() const { return draws * log(p0); }
const double p0;
int draws;
@@ -62,10 +74,13 @@ template<> struct PYPLM<0> {
// represents an N-gram LM
template <unsigned N> struct PYPLM {
- PYPLM(unsigned vs) : backoff(vs), d(0.8), alpha(1.0) {}
+ PYPLM(unsigned vs, double da, double db, double ss, double sr) :
+ backoff(vs, da, db, ss, sr),
+ discount_a(da), discount_b(db),
+ strength_s(ss), strength_r(sr),
+ d(0.8), alpha(1.0), lookup(N-1) {}
void increment(WordID w, const vector<WordID>& context, MT19937* rng) {
const double bo = backoff.prob(w, context);
- static vector<WordID> lookup(N-1);
for (unsigned i = 0; i < N-1; ++i)
lookup[i] = context[context.size() - 1 - i];
typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::iterator it = p.find(lookup);
@@ -75,7 +90,6 @@ template <unsigned N> struct PYPLM {
backoff.increment(w, context, rng);
}
void decrement(WordID w, const vector<WordID>& context, MT19937* rng) {
- static vector<WordID> lookup(N-1);
for (unsigned i = 0; i < N-1; ++i)
lookup[i] = context[context.size() - 1 - i];
typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::iterator it = p.find(lookup);
@@ -85,7 +99,6 @@ template <unsigned N> struct PYPLM {
}
double prob(WordID w, const vector<WordID>& context) const {
const double bo = backoff.prob(w, context);
- static vector<WordID> lookup(N-1);
for (unsigned i = 0; i < N-1; ++i)
lookup[i] = context[context.size() - 1 - i];
typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::const_iterator it = p.find(lookup);
@@ -99,7 +112,9 @@ template <unsigned N> struct PYPLM {
double log_likelihood(const double& dd, const double& aa) const {
if (aa <= -dd) return -std::numeric_limits<double>::infinity();
- double llh = Md::log_beta_density(dd, 1, 1) + Md::log_gamma_density(aa, 1, 1);
+ //double llh = Md::log_beta_density(dd, 10, 3) + Md::log_gamma_density(aa, 1, 1);
+ double llh = Md::log_beta_density(dd, discount_a, discount_b) +
+ Md::log_gamma_density(aa, strength_s, strength_r);
typename unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > >::const_iterator it;
for (it = p.begin(); it != p.end(); ++it)
llh += it->second.log_crp_prob(dd, aa);
@@ -143,7 +158,9 @@ template <unsigned N> struct PYPLM {
}
PYPLM<N-1> backoff;
+ double discount_a, discount_b, strength_s, strength_r;
double d, alpha;
+ mutable vector<WordID> lookup; // thread-local
unordered_map<vector<WordID>, CCRP<WordID>, boost::hash<vector<WordID> > > p;
};
@@ -161,14 +178,21 @@ int main(int argc, char** argv) {
set<WordID> vocabe;
const WordID kEOS = TD::Convert("</s>");
cerr << "Reading corpus...\n";
- CorpusTools::ReadFromFile(conf["input"].as<string>(), &corpuse, &vocabe);
+ CorpusTools::ReadFromFile(conf["train"].as<string>(), &corpuse, &vocabe);
cerr << "E-corpus size: " << corpuse.size() << " sentences\t (" << vocabe.size() << " word types)\n";
-#define kORDER 3
- PYPLM<kORDER> lm(vocabe.size());
+ vector<vector<WordID> > test;
+ if (conf.count("test"))
+ CorpusTools::ReadFromFile(conf["test"].as<string>(), &test);
+ else
+ test = corpuse;
+ PYPLM<kORDER> lm(vocabe.size(),
+ conf["discount_prior_a"].as<double>(),
+ conf["discount_prior_b"].as<double>(),
+ conf["strength_prior_s"].as<double>(),
+ conf["strength_prior_r"].as<double>());
vector<WordID> ctx(kORDER - 1, TD::Convert("<s>"));
- int mci = corpuse.size() * 99 / 100;
for (int SS=0; SS < samples; ++SS) {
- for (int ci = 0; ci < mci; ++ci) {
+ for (int ci = 0; ci < corpuse.size(); ++ci) {
ctx.resize(kORDER - 1);
const vector<WordID>& s = corpuse[ci];
for (int i = 0; i <= s.size(); ++i) {
@@ -187,22 +211,33 @@ int main(int argc, char** argv) {
}
double llh = 0;
unsigned cnt = 0;
- for (int ci = mci; ci < corpuse.size(); ++ci) {
+ unsigned oovs = 0;
+ for (int ci = 0; ci < test.size(); ++ci) {
ctx.resize(kORDER - 1);
- const vector<WordID>& s = corpuse[ci];
+ const vector<WordID>& s = test[ci];
for (int i = 0; i <= s.size(); ++i) {
WordID w = (i < s.size() ? s[i] : kEOS);
double lp = log(lm.prob(w, ctx)) / log(2);
- cerr << "p(" << TD::Convert(w) << " | " << TD::GetString(ctx) << ") = " << lp << endl;
+ if (i < s.size() && vocabe.count(w) == 0) {
+ cerr << "**OOV ";
+ ++oovs;
+ lp = 0;
+ }
+ cerr << "p(" << TD::Convert(w) << " |";
+ for (int j = ctx.size() + 1 - kORDER; j < ctx.size(); ++j)
+ cerr << ' ' << TD::Convert(ctx[j]);
+ cerr << ") = " << lp << endl;
ctx.push_back(w);
llh -= lp;
cnt++;
}
}
- cerr << " Log_10 prob: " << (llh * log(2) / log(10)) << endl;
- cerr << " Count: " << (cnt) << endl;
+ cerr << " Log_10 prob: " << (-llh * log(2) / log(10)) << endl;
+ cerr << " Count: " << cnt << endl;
+ cerr << " OOVs: " << oovs << endl;
cerr << "Cross-entropy: " << (llh / cnt) << endl;
cerr << " Perplexity: " << pow(2, llh / cnt) << endl;
return 0;
}
+