From 0e2d2d42c03cd37a56edd4fc5970c21c17c6c1ec Mon Sep 17 00:00:00 2001 From: graehl Date: Tue, 6 Jul 2010 23:15:12 +0000 Subject: comment git-svn-id: https://ws10smt.googlecode.com/svn/trunk@171 ec762483-ff6d-05da-a07a-a48fb63a330f --- decoder/ff.cc | 2 +- decoder/ff_lm.cc | 16 ++++++++++++++-- decoder/hg.cc | 2 +- decoder/inside_outside.h | 4 ++-- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/decoder/ff.cc b/decoder/ff.cc index 7186d776..261e9a17 100644 --- a/decoder/ff.cc +++ b/decoder/ff.cc @@ -93,7 +93,7 @@ ModelSet::ModelSet(const vector& w, const vector } void ModelSet::AddFeaturesToEdge(const SentenceMetadata& smeta, - const Hypergraph& hg, + const Hypergraph& /* hg */, const vector& node_states, Hypergraph::Edge* edge, string* context, diff --git a/decoder/ff_lm.cc b/decoder/ff_lm.cc index a12a2667..8333bf7b 100644 --- a/decoder/ff_lm.cc +++ b/decoder/ff_lm.cc @@ -4,6 +4,13 @@ //NOTE: if ngram order is bigger than lm state's, then the longest possible ngram scores are still used. if you really want a lower order, a truncated copy of the LM should be small enough. otherwise, an option to null out words outside of the order's window would need to be implemented. +//#define UNIGRAM_DEBUG +#ifdef UNIGRAM_DEBUG +# define UNIDBG(x) do { cerr << x; } while(0) +#else +# define UNIDBG(x) +#endif + #include "ff_lm.h" #include @@ -168,7 +175,7 @@ class LanguageModelImpl { kNONE(-1), kSTAR(TD::Convert("<{STAR}>")) , unigram(order<=1) {} - +//TODO: show that unigram special case (0 state) computes what it should. LanguageModelImpl(int order, const string& f) : ngram_(*TD::dict_, order), buffer_(), order_(order), state_size_(OrderToStateSize(order) - 1), floor_(-100.0), @@ -300,14 +307,19 @@ class LanguageModelImpl { /// just how SRILM likes it: [rbegin,rend) is a phrase in reverse word order and null terminated so *rend=kNONE. return unigram score for rend[-1] plus /// cost returned is some kind of log prob (who cares, we're just adding) double stateless_cost(WordID *rbegin,WordID *rend) { + UNIDBG("p("); double sum=0; - for (;rend>rbegin;--rend) + for (;rend>rbegin;--rend) { sum+=clamp(WordProb(rend[-1],rend)); + UNIDBG(","<