diff options
64 files changed, 1287 insertions, 192 deletions
diff --git a/decoder/aligner.cc b/decoder/aligner.cc index 53e059fb..232e022a 100644 --- a/decoder/aligner.cc +++ b/decoder/aligner.cc @@ -11,7 +11,7 @@  #include "sentence_metadata.h"  #include "inside_outside.h"  #include "viterbi.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h"  using namespace std; @@ -300,7 +300,7 @@ void AlignerTools::WriteAlignment(const Lattice& src_lattice,        cerr << grid << endl;      }      (*out) << TD::GetString(src_sent) << " ||| " << TD::GetString(trg_sent) << " ||| "; -    AlignmentPharaoh::SerializePharaohFormat(grid, out); +    AlignmentIO::SerializePharaohFormat(grid, out);    }  }; diff --git a/decoder/decoder.cc b/decoder/decoder.cc index 53c47d21..ec6f75f7 100644 --- a/decoder/decoder.cc +++ b/decoder/decoder.cc @@ -57,7 +57,6 @@ static const double kMINUS_EPSILON = -1e-6;  // don't be too strict  using namespace std;  using namespace std::tr1; -using boost::shared_ptr;  namespace po = boost::program_options;  static bool verbose_feature_functions=true; @@ -101,7 +100,7 @@ inline string str(char const* name,po::variables_map const& conf) {  // print just the --long_opt names suitable for bash compgen  inline void print_options(std::ostream &out,po::options_description const& opts) { -  typedef std::vector< shared_ptr<po::option_description> > Ds; +  typedef std::vector< boost::shared_ptr<po::option_description> > Ds;    Ds const& ds=opts.options();    out << '"';    for (unsigned i=0;i<ds.size();++i) { @@ -120,13 +119,13 @@ inline bool store_conf(po::variables_map const& conf,std::string const& name,V *    return false;  } -inline shared_ptr<FeatureFunction> make_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { +inline boost::shared_ptr<FeatureFunction> make_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") {    string ff, param;    SplitCommandAndParam(ffp, &ff, ¶m);    cerr << pre << "feature: " << ff;    if (param.size() > 0) cerr << " (with config parameters '" << param << "')\n";    else cerr << " (no config parameters)\n"; -  shared_ptr<FeatureFunction> pf = ff_registry.Create(ff, param); +  boost::shared_ptr<FeatureFunction> pf = ff_registry.Create(ff, param);    if (!pf) exit(1);    int nbyte=pf->NumBytesContext();    if (verbose_feature_functions) @@ -135,13 +134,13 @@ inline shared_ptr<FeatureFunction> make_ff(string const& ffp,bool verbose_featur  }  #ifdef FSA_RESCORING -inline shared_ptr<FsaFeatureFunction> make_fsa_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") { +inline boost::shared_ptr<FsaFeatureFunction> make_fsa_ff(string const& ffp,bool verbose_feature_functions,char const* pre="") {    string ff, param;    SplitCommandAndParam(ffp, &ff, ¶m);    cerr << "FSA Feature: " << ff;    if (param.size() > 0) cerr << " (with config parameters '" << param << "')\n";    else cerr << " (no config parameters)\n"; -  shared_ptr<FsaFeatureFunction> pf = fsa_ff_registry.Create(ff, param); +  boost::shared_ptr<FsaFeatureFunction> pf = fsa_ff_registry.Create(ff, param);    if (!pf) exit(1);    if (verbose_feature_functions)      cerr<<"State is "<<pf->state_bytes()<<" bytes for "<<pre<<"feature "<<ffp<<endl; @@ -156,10 +155,10 @@ inline shared_ptr<FsaFeatureFunction> make_fsa_ff(string const& ffp,bool verbose  // passes are carried over into subsequent passes (where they may have different weights).  struct RescoringPass {    RescoringPass() : fid_summary(), density_prune(), beam_prune() {} -  shared_ptr<ModelSet> models; -  shared_ptr<IntersectionConfiguration> inter_conf; +  boost::shared_ptr<ModelSet> models; +  boost::shared_ptr<IntersectionConfiguration> inter_conf;    vector<const FeatureFunction*> ffs; -  shared_ptr<vector<weight_t> > weight_vector; +  boost::shared_ptr<vector<weight_t> > weight_vector;    int fid_summary;            // 0 == no summary feature    double density_prune;       // 0 == don't density prune    double beam_prune;          // 0 == don't beam prune @@ -293,15 +292,15 @@ struct DecoderImpl {    po::variables_map& conf;    OracleBleu oracle;    string formalism; -  shared_ptr<Translator> translator; -  shared_ptr<vector<weight_t> > init_weights; // weights used with initial parse -  vector<shared_ptr<FeatureFunction> > pffs; +  boost::shared_ptr<Translator> translator; +  boost::shared_ptr<vector<weight_t> > init_weights; // weights used with initial parse +  vector<boost::shared_ptr<FeatureFunction> > pffs;  #ifdef FSA_RESCORING    CFGOptions cfg_options; -  vector<shared_ptr<FsaFeatureFunction> > fsa_ffs; +  vector<boost::shared_ptr<FsaFeatureFunction> > fsa_ffs;    vector<string> fsa_names;  #endif -  shared_ptr<RandomNumberGenerator<boost::mt19937> > rng; +  boost::shared_ptr<RandomNumberGenerator<boost::mt19937> > rng;    int sample_max_trans;    bool aligner_mode;    bool graphviz;  @@ -310,7 +309,7 @@ struct DecoderImpl {    bool kbest;    bool unique_kbest;    bool get_oracle_forest; -  shared_ptr<WriteFile> extract_file; +  boost::shared_ptr<WriteFile> extract_file;    int combine_size;    int sent_id;    SparseVector<prob_t> acc_vec;  // accumulate gradient @@ -622,7 +621,7 @@ DecoderImpl::DecoderImpl(po::variables_map& conf, int argc, char** argv, istream    }    // set up weight vectors since later phases may reuse weights from earlier phases -  shared_ptr<vector<weight_t> > prev_weights = init_weights; +  boost::shared_ptr<vector<weight_t> > prev_weights = init_weights;    for (int pass = 0; pass < rescoring_passes.size(); ++pass) {      RescoringPass& rp = rescoring_passes[pass];      if (!rp.weight_vector) { diff --git a/decoder/earley_composer.cc b/decoder/earley_composer.cc index b7af801a..385baf8b 100644 --- a/decoder/earley_composer.cc +++ b/decoder/earley_composer.cc @@ -16,8 +16,6 @@  #include "tdict.h"  #include "hg.h" -using boost::shared_ptr; -namespace po = boost::program_options;  using namespace std;  using namespace std::tr1; @@ -111,7 +109,7 @@ struct Edge {    const Edge* const active_parent;    // back pointer, NULL for PREDICT items    const Edge* const passive_parent;   // back pointer, NULL for SCAN and PREDICT items    const TargetPhraseSet* const tps;   // translations -  shared_ptr<SparseVector<double> > features; // features from CFG rule +  boost::shared_ptr<SparseVector<double> > features; // features from CFG rule    bool IsPassive() const {      // when a rule is completed, this value will be set diff --git a/decoder/ff_wordalign.cc b/decoder/ff_wordalign.cc index 9e7c618e..decdf9bc 100644 --- a/decoder/ff_wordalign.cc +++ b/decoder/ff_wordalign.cc @@ -15,7 +15,6 @@  #include "factored_lexicon_helper.h"  #include "verbose.h" -#include "alignment_pharaoh.h"  #include "stringlib.h"  #include "sentence_metadata.h"  #include "hg.h" diff --git a/decoder/grammar.cc b/decoder/grammar.cc index 9e4065a6..714390f0 100644 --- a/decoder/grammar.cc +++ b/decoder/grammar.cc @@ -3,12 +3,14 @@  #include <algorithm>  #include <utility>  #include <map> +#include <tr1/unordered_map>  #include "rule_lexer.h"  #include "filelib.h"  #include "tdict.h"  using namespace std; +using namespace std::tr1;  const vector<TRulePtr> Grammar::NO_RULES; @@ -148,24 +150,24 @@ bool GlueGrammar::HasRuleForSpan(int i, int /* j */, int /* distance */) const {    return (i == 0);  } -PassThroughGrammar::PassThroughGrammar(const Lattice& input, const string& cat, const unsigned int ctf_level) : -    has_rule_(input.size() + 1) { +PassThroughGrammar::PassThroughGrammar(const Lattice& input, const string& cat, const unsigned int ctf_level) { +  unordered_set<WordID> ss;    for (int i = 0; i < input.size(); ++i) {      const vector<LatticeArc>& alts = input[i];      for (int k = 0; k < alts.size(); ++k) {        const int j = alts[k].dist2next + i; -      has_rule_[i].insert(j);        const string& src = TD::Convert(alts[k].label); -      TRulePtr pt(new TRule("[" + cat + "] ||| " + src + " ||| " + src + " ||| PassThrough=1")); -      pt->a_.push_back(AlignmentPoint(0,0)); -      AddRule(pt); -      RefineRule(pt, ctf_level); +      if (ss.count(alts[k].label) == 0) { +        TRulePtr pt(new TRule("[" + cat + "] ||| " + src + " ||| " + src + " ||| PassThrough=1")); +        pt->a_.push_back(AlignmentPoint(0,0)); +        AddRule(pt); +        RefineRule(pt, ctf_level); +        ss.insert(alts[k].label); +      }      }    }  } -bool PassThroughGrammar::HasRuleForSpan(int i, int j, int /* distance */) const { -  const set<int>& hr = has_rule_[i]; -  if (i == j) { return !hr.empty(); } -  return (hr.find(j) != hr.end()); +bool PassThroughGrammar::HasRuleForSpan(int, int, int distance) const { +  return (distance < 2);  } diff --git a/decoder/grammar.h b/decoder/grammar.h index f5d00817..e6a15a69 100644 --- a/decoder/grammar.h +++ b/decoder/grammar.h @@ -91,8 +91,6 @@ struct GlueGrammar : public TextGrammar {  struct PassThroughGrammar : public TextGrammar {    PassThroughGrammar(const Lattice& input, const std::string& cat, const unsigned int ctf_level=0);    virtual bool HasRuleForSpan(int i, int j, int distance) const; - private: -  std::vector<std::set<int> > has_rule_;  // index by [i][j]  };  void RefineRule(TRulePtr pt, const unsigned int ctf_level); diff --git a/decoder/hg_io.cc b/decoder/hg_io.cc index 9f0f50fa..d416dbf6 100644 --- a/decoder/hg_io.cc +++ b/decoder/hg_io.cc @@ -401,6 +401,26 @@ string HypergraphIO::AsPLF(const Hypergraph& hg, bool include_global_parentheses    return os.str();  } +string HypergraphIO::AsPLF(const Lattice& lat, bool include_global_parentheses) { +  static bool first = true; +  if (first) { InitEscapes(); first = false; } +  if (lat.empty()) return "()"; +  ostringstream os; +  if (include_global_parentheses) os << '('; +  static const string EPS="*EPS*"; +  for (int i = 0; i < lat.size(); ++i) { +    const vector<LatticeArc> arcs = lat[i]; +    os << '('; +    for (int j = 0; j < arcs.size(); ++j) { +      os << "('" << Escape(TD::Convert(arcs[j].label)) << "'," +                 << arcs[j].cost << ',' << arcs[j].dist2next << "),"; +    } +    os << "),"; +  } +  if (include_global_parentheses) os << ')'; +  return os.str(); +} +  namespace PLF {  const string chars = "'\\"; diff --git a/decoder/hg_io.h b/decoder/hg_io.h index 44817157..4e502a0c 100644 --- a/decoder/hg_io.h +++ b/decoder/hg_io.h @@ -30,6 +30,7 @@ struct HypergraphIO {    static void ReadFromPLF(const std::string& in, Hypergraph* out, int line = 0);    // return PLF string representation (undefined behavior on non-lattices)    static std::string AsPLF(const Hypergraph& hg, bool include_global_parentheses = true); +  static std::string AsPLF(const Lattice& lat, bool include_global_parentheses = true);    static void PLFtoLattice(const std::string& plf, Lattice* pl);    static std::string Escape(const std::string& s);  // PLF helper  }; diff --git a/decoder/phrasetable_fst.cc b/decoder/phrasetable_fst.cc index f421e941..b3bec86b 100644 --- a/decoder/phrasetable_fst.cc +++ b/decoder/phrasetable_fst.cc @@ -9,7 +9,6 @@  #include "filelib.h"  #include "tdict.h" -using boost::shared_ptr;  using namespace std;  TargetPhraseSet::~TargetPhraseSet() {} @@ -46,7 +45,7 @@ class TextFSTNode : public FSTNode {    void ClearPassThroughTranslations();   private:    vector<WordID> passthroughs; -  shared_ptr<TargetPhraseSet> data; +  boost::shared_ptr<TargetPhraseSet> data;    map<WordID, TextFSTNode> ptr;  }; diff --git a/dpmert/ces.cc b/dpmert/ces.cc index a85454da..c6cb1cdf 100644 --- a/dpmert/ces.cc +++ b/dpmert/ces.cc @@ -11,7 +11,6 @@  #include "error_surface.h"  #include "ns.h" -using boost::shared_ptr;  using namespace std;  const bool minimize_segments = true;    // if adjacent segments have equal scores, merge them @@ -22,7 +21,7 @@ void ComputeErrorSurface(const SegmentEvaluator& ss,                           const EvaluationMetric* metric,                           const Hypergraph& hg) {    vector<WordID> prev_trans; -  const vector<shared_ptr<MERTPoint> >& ienv = ve.GetSortedSegs(); +  const vector<boost::shared_ptr<MERTPoint> >& ienv = ve.GetSortedSegs();    env->resize(ienv.size());    SufficientStats prev_score; // defaults to 0    int j = 0; diff --git a/dpmert/lo_test.cc b/dpmert/lo_test.cc index d9b909b8..5d90aabb 100644 --- a/dpmert/lo_test.cc +++ b/dpmert/lo_test.cc @@ -19,7 +19,6 @@  #include "line_optimizer.h"  using namespace std; -using boost::shared_ptr;  class OptTest : public testing::Test {   protected: @@ -44,12 +43,12 @@ TEST_F(OptTest, TestCheckNaN) {  }  TEST_F(OptTest,TestConvexHull) { -  shared_ptr<MERTPoint> a1(new MERTPoint(-1, 0)); -  shared_ptr<MERTPoint> b1(new MERTPoint(1, 0)); -  shared_ptr<MERTPoint> a2(new MERTPoint(-1, 1)); -  shared_ptr<MERTPoint> b2(new MERTPoint(1, -1)); -  vector<shared_ptr<MERTPoint> > sa; sa.push_back(a1); sa.push_back(b1); -  vector<shared_ptr<MERTPoint> > sb; sb.push_back(a2); sb.push_back(b2); +  boost::shared_ptr<MERTPoint> a1(new MERTPoint(-1, 0)); +  boost::shared_ptr<MERTPoint> b1(new MERTPoint(1, 0)); +  boost::shared_ptr<MERTPoint> a2(new MERTPoint(-1, 1)); +  boost::shared_ptr<MERTPoint> b2(new MERTPoint(1, -1)); +  vector<boost::shared_ptr<MERTPoint> > sa; sa.push_back(a1); sa.push_back(b1); +  vector<boost::shared_ptr<MERTPoint> > sb; sb.push_back(a2); sb.push_back(b2);    ConvexHull a(sa);    cerr << a << endl;    ConvexHull b(sb); diff --git a/dpmert/mert_geometry.cc b/dpmert/mert_geometry.cc index 81b25af9..d6973658 100644 --- a/dpmert/mert_geometry.cc +++ b/dpmert/mert_geometry.cc @@ -4,13 +4,12 @@  #include <limits>  using namespace std; -using boost::shared_ptr;  ConvexHull::ConvexHull(int i) {    if (i == 0) {      // do nothing - <>    } else if (i == 1) { -    points.push_back(shared_ptr<MERTPoint>(new MERTPoint(0, 0, 0, shared_ptr<MERTPoint>(), shared_ptr<MERTPoint>()))); +    points.push_back(boost::shared_ptr<MERTPoint>(new MERTPoint(0, 0, 0, boost::shared_ptr<MERTPoint>(), boost::shared_ptr<MERTPoint>())));      assert(this->IsMultiplicativeIdentity());    } else {      cerr << "Only can create ConvexHull semiring 0 and 1 with this constructor!\n"; @@ -27,7 +26,7 @@ const ConvexHull ConvexHullWeightFunction::operator()(const Hypergraph::Edge& e)  ostream& operator<<(ostream& os, const ConvexHull& env) {    os << '<'; -  const vector<shared_ptr<MERTPoint> >& points = env.GetSortedSegs(); +  const vector<boost::shared_ptr<MERTPoint> >& points = env.GetSortedSegs();    for (int i = 0; i < points.size(); ++i)      os << (i==0 ? "" : "|") << "x=" << points[i]->x << ",b=" << points[i]->b << ",m=" << points[i]->m << ",p1=" << points[i]->p1 << ",p2=" << points[i]->p2;    return os << '>'; @@ -37,7 +36,7 @@ ostream& operator<<(ostream& os, const ConvexHull& env) {  #ifdef ORIGINAL_MERT_IMPLEMENTATION  struct SlopeCompare { -  bool operator() (const shared_ptr<MERTPoint>& a, const shared_ptr<MERTPoint>& b) const { +  bool operator() (const boost::shared_ptr<MERTPoint>& a, const boost::shared_ptr<MERTPoint>& b) const {      return a->m < b->m;    }  }; @@ -93,7 +92,7 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) {    if (this->IsEdgeEnvelope()) {  //    if (other.size() > 1)  //      cerr << *this << " (TIMES) " << other << endl; -    shared_ptr<MERTPoint> edge_parent = points[0]; +    boost::shared_ptr<MERTPoint> edge_parent = points[0];      const double& edge_b = edge_parent->b;      const double& edge_m = edge_parent->m;      points.clear(); @@ -102,13 +101,13 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) {        const double m = p.m + edge_m;        const double b = p.b + edge_b;        const double& x = p.x;       // x's don't change with * -      points.push_back(shared_ptr<MERTPoint>(new MERTPoint(x, m, b, edge_parent, other.points[i]))); +      points.push_back(boost::shared_ptr<MERTPoint>(new MERTPoint(x, m, b, edge_parent, other.points[i])));        assert(points.back()->p1->edge);      }  //    if (other.size() > 1)  //      cerr << " = " << *this << endl;    } else { -    vector<shared_ptr<MERTPoint> > new_points; +    vector<boost::shared_ptr<MERTPoint> > new_points;      int this_i = 0;      int other_i = 0;      const int this_size  = points.size(); @@ -124,7 +123,7 @@ const ConvexHull& ConvexHull::operator*=(const ConvexHull& other) {        const double m = this_point.m + other_point.m;        const double b = this_point.b + other_point.b; -      new_points.push_back(shared_ptr<MERTPoint>(new MERTPoint(cur_x, m, b, points[this_i], other.points[other_i]))); +      new_points.push_back(boost::shared_ptr<MERTPoint>(new MERTPoint(cur_x, m, b, points[this_i], other.points[other_i])));        int comp = 0;        if (this_next_val < other_next_val) comp = -1; else          if (this_next_val > other_next_val) comp = 1; diff --git a/extools/extract.cc b/extools/extract.cc index f6c121b4..49542fed 100644 --- a/extools/extract.cc +++ b/extools/extract.cc @@ -131,16 +131,16 @@ lookup_and_append(const map<K, V> &dict, const K &key, V &output)  // phrases if there is more than one annotation.  // TODO: support source annotation  void Extract::AnnotatePhrasesWithCategoryTypes(const WordID default_cat, -                                      const map< tuple<short,short,short,short>, vector<WordID> > &types, +                                      const map< boost::tuple<short,short,short,short>, vector<WordID> > &types,                                        vector<ParallelSpan>* phrases) {    const int num_unannotated_phrases = phrases->size();    // have to use num_unannotated_phrases since we may grow the vector    for (int i = 0; i < num_unannotated_phrases; ++i) {      ParallelSpan& phrase = (*phrases)[i];      vector<WordID> cats; -    lookup_and_append(types, make_tuple(phrase.i1, phrase.i2, phrase.j1, phrase.j2), cats); -    lookup_and_append(types, make_tuple((short)-1, (short)-1, phrase.j1, phrase.j2), cats); -    lookup_and_append(types, make_tuple(phrase.i1, phrase.i2, (short)-1, (short)-1), cats); +    lookup_and_append(types, boost::make_tuple(phrase.i1, phrase.i2, phrase.j1, phrase.j2), cats); +    lookup_and_append(types, boost::make_tuple((short)-1, (short)-1, phrase.j1, phrase.j2), cats); +    lookup_and_append(types, boost::make_tuple(phrase.i1, phrase.i2, (short)-1, (short)-1), cats);      if (cats.empty() && default_cat != 0) {        cats = vector<WordID>(1, default_cat);      } diff --git a/extools/featurize_grammar.cc b/extools/featurize_grammar.cc index ebae9fdc..78175202 100644 --- a/extools/featurize_grammar.cc +++ b/extools/featurize_grammar.cc @@ -136,8 +136,8 @@ class FERegistry {   public:    FERegistry() {}    boost::shared_ptr<FeatureExtractor> Create(const std::string& ffname) const { -    map<string, shared_ptr<FEFactoryBase> >::const_iterator it = reg_.find(ffname); -    shared_ptr<FeatureExtractor> res; +    map<string, boost::shared_ptr<FEFactoryBase> >::const_iterator it = reg_.find(ffname); +    boost::shared_ptr<FeatureExtractor> res;      if (it == reg_.end()) {        cerr << "I don't know how to create feature " << ffname << endl;      } else { @@ -147,7 +147,7 @@ class FERegistry {    }    void DisplayList(ostream* out) const {      bool first = true; -    for (map<string, shared_ptr<FEFactoryBase> >::const_iterator it = reg_.begin(); +    for (map<string, boost::shared_ptr<FEFactoryBase> >::const_iterator it = reg_.begin();          it != reg_.end(); ++it) {        if (first) {first=false;} else {*out << ' ';}        *out << it->first; diff --git a/extools/sentence_pair.cc b/extools/sentence_pair.cc index d5ebe48f..7d60715a 100644 --- a/extools/sentence_pair.cc +++ b/extools/sentence_pair.cc @@ -71,8 +71,7 @@ int AnnotatedParallelSentence::ReadAlignmentPoint(const char* buf,      exit(1);    }    (*b) = 0; -  //TODO: is this what is intended?  parses as A && B || C && D. -  while(ch < end && (c == 0 && (!permit_col || (permit_col && buf[ch] != ':')) || c != 0 && buf[ch] != '-')) { +  while((ch < end) && (c == 0 && (!permit_col || (permit_col && buf[ch] != ':')) || c != 0 && buf[ch] != '-')) {      if ((buf[ch] < '0') || (buf[ch] > '9')) {        cerr << "Alignment point badly formed 4: " << string(buf, start, end-start) << endl << buf << endl << buf[ch] << endl;        exit(1); @@ -151,7 +150,7 @@ void AnnotatedParallelSentence::ParseSpanLabel(const char* buf, int start, int e      exit(1);    }    // cerr << a << " " << b << " " << string(buf,c,end-c) << endl; -  span_types[make_tuple(a,b,c,d)].push_back(-TD::Convert(string(buf, ch, end-ch))); +  span_types[boost::make_tuple(a,b,c,d)].push_back(-TD::Convert(string(buf, ch, end-ch)));  }  // INPUT FORMAT diff --git a/gi/pf/Makefile.am b/gi/pf/Makefile.am index f9c979d0..86f8e07b 100644 --- a/gi/pf/Makefile.am +++ b/gi/pf/Makefile.am @@ -1,8 +1,14 @@ -bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl +bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test bayes_lattice_score  noinst_LIBRARIES = libpf.a -libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc pyp_word_model.cc pyp_tm.cc +libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc hpyp_tm.cc pyp_tm.cc + +bayes_lattice_score_SOURCES = bayes_lattice_score.cc +bayes_lattice_score_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz + +pf_test_SOURCES = pf_test.cc +pf_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz  nuisance_test_SOURCES = nuisance_test.cc  nuisance_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz diff --git a/gi/pf/align-lexonly-pyp.cc b/gi/pf/align-lexonly-pyp.cc index 942dcf51..e7509f57 100644 --- a/gi/pf/align-lexonly-pyp.cc +++ b/gi/pf/align-lexonly-pyp.cc @@ -11,6 +11,7 @@  #include "sampler.h"  #include "corpus.h"  #include "pyp_tm.h" +#include "hpyp_tm.h"  #include "quasi_model2.h"  using namespace std; @@ -61,15 +62,17 @@ struct AlignedSentencePair {    Array2D<short> posterior;  }; +template <class LexicalTranslationModel>  struct Aligner {    Aligner(const vector<vector<WordID> >& lets, +          int vocab_size,            int num_letters,            const po::variables_map& conf,            vector<AlignedSentencePair>* c) :        corpus(*c),        paj_model(conf["align_alpha"].as<double>(), conf["p_null"].as<double>()),        infer_paj(conf.count("infer_alignment_hyperparameters") > 0), -      model(lets, num_letters), +      model(lets, vocab_size, num_letters),        kNULL(TD::Convert("NULL")) {      assert(lets[kNULL].size() == 0);    } @@ -77,7 +80,7 @@ struct Aligner {    vector<AlignedSentencePair>& corpus;    QuasiModel2 paj_model;    const bool infer_paj; -  PYPLexicalTranslation model; +  LexicalTranslationModel model;    const WordID kNULL;    void ResampleHyperparameters() { @@ -217,7 +220,8 @@ int main(int argc, char** argv) {    ExtractLetters(vocabf, &letters, NULL);    letters[TD::Convert("NULL")].clear(); -  Aligner aligner(letters, letset.size(), conf, &corpus); +  //Aligner<PYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus); +  Aligner<HPYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus);    aligner.InitializeRandom();    const unsigned samples = conf["samples"].as<unsigned>(); diff --git a/gi/pf/align-tl.cc b/gi/pf/align-tl.cc index cbe8c6c8..f6608f1d 100644 --- a/gi/pf/align-tl.cc +++ b/gi/pf/align-tl.cc @@ -58,7 +58,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    }  } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  struct LexicalAlignment {    unsigned char src_index; diff --git a/gi/pf/bayes_lattice_score.cc b/gi/pf/bayes_lattice_score.cc new file mode 100644 index 00000000..70cb8dc2 --- /dev/null +++ b/gi/pf/bayes_lattice_score.cc @@ -0,0 +1,309 @@ +#include <iostream> +#include <queue> + +#include <boost/functional.hpp> +#include <boost/program_options.hpp> +#include <boost/program_options/variables_map.hpp> + +#include "inside_outside.h" +#include "hg.h" +#include "hg_io.h" +#include "bottom_up_parser.h" +#include "fdict.h" +#include "grammar.h" +#include "m.h" +#include "trule.h" +#include "tdict.h" +#include "filelib.h" +#include "dict.h" +#include "sampler.h" +#include "ccrp.h" +#include "ccrp_onetable.h" + +using namespace std; +using namespace tr1; +namespace po = boost::program_options; + +boost::shared_ptr<MT19937> prng; + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { +  po::options_description opts("Configuration options"); +  opts.add_options() +        ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") +        ("input,i",po::value<string>(),"Read parallel data from") +        ("random_seed,S",po::value<uint32_t>(), "Random seed"); +  po::options_description clo("Command line options"); +  clo.add_options() +        ("config", po::value<string>(), "Configuration file") +        ("help", "Print this help message and exit"); +  po::options_description dconfig_options, dcmdline_options; +  dconfig_options.add(opts); +  dcmdline_options.add(opts).add(clo); +   +  po::store(parse_command_line(argc, argv, dcmdline_options), *conf); +  if (conf->count("config")) { +    ifstream config((*conf)["config"].as<string>().c_str()); +    po::store(po::parse_config_file(config, dconfig_options), *conf); +  } +  po::notify(*conf); + +  if (conf->count("help") || (conf->count("input") == 0)) { +    cerr << dcmdline_options << endl; +    exit(1); +  } +} + +unsigned ReadCorpus(const string& filename, +                    vector<Lattice>* e, +                    set<WordID>* vocab_e) { +  e->clear(); +  vocab_e->clear(); +  ReadFile rf(filename); +  istream* in = rf.stream(); +  assert(*in); +  string line; +  unsigned toks = 0; +  while(*in) { +    getline(*in, line); +    if (line.empty() && !*in) break; +    e->push_back(Lattice()); +    Lattice& le = e->back(); +    LatticeTools::ConvertTextOrPLF(line, & le); +    for (unsigned i = 0; i < le.size(); ++i) +      for (unsigned j = 0; j < le[i].size(); ++j) +        vocab_e->insert(le[i][j].label); +    toks += le.size(); +  } +  return toks; +} + +struct BaseModel { +  explicit BaseModel(unsigned tc) : +      unif(1.0 / tc), p(prob_t::One()) {} +  prob_t prob(const TRule& r) const { +    return unif; +  } +  void increment(const TRule& r, MT19937* rng) { +    p *= prob(r); +  } +  void decrement(const TRule& r, MT19937* rng) { +    p /= prob(r); +  } +  prob_t Likelihood() const { +    return p; +  } +  const prob_t unif; +  prob_t p; +}; + +struct UnigramModel { +  explicit UnigramModel(unsigned tc) : base(tc), crp(1,1,1,1), glue(1,1,1,1) {} +  BaseModel base; +  CCRP<TRule> crp; +  CCRP<TRule> glue; + +  prob_t Prob(const TRule& r) const { +    if (r.Arity() != 0) { +      return glue.prob(r, prob_t(0.5)); +    } +    return crp.prob(r, base.prob(r)); +  } + +  int Increment(const TRule& r, MT19937* rng) { +    if (r.Arity() != 0) { +      glue.increment(r, 0.5, rng); +      return 0; +    } else { +      if (crp.increment(r, base.prob(r), rng)) { +        base.increment(r, rng); +        return 1; +      } +      return 0; +    } +  } + +  int Decrement(const TRule& r, MT19937* rng) { +    if (r.Arity() != 0) { +      glue.decrement(r, rng); +      return 0; +    } else { +      if (crp.decrement(r, rng)) { +        base.decrement(r, rng); +        return -1; +      } +      return 0; +    } +  } + +  prob_t Likelihood() const { +    prob_t p; +    p.logeq(crp.log_crp_prob() + glue.log_crp_prob()); +    p *= base.Likelihood(); +    return p; +  } + +  void ResampleHyperparameters(MT19937* rng) { +    crp.resample_hyperparameters(rng); +    glue.resample_hyperparameters(rng); +    cerr << " d=" << crp.discount() << ", s=" << crp.strength() << "\t STOP d=" << glue.discount() << ", s=" << glue.strength() << endl; +  } +}; + +UnigramModel* plm; + +void SampleDerivation(const Hypergraph& hg, MT19937* rng, vector<unsigned>* sampled_deriv) { +  vector<prob_t> node_probs; +  Inside<prob_t, EdgeProb>(hg, &node_probs); +  queue<unsigned> q; +  q.push(hg.nodes_.size() - 2); +  while(!q.empty()) { +    unsigned cur_node_id = q.front(); +//    cerr << "NODE=" << cur_node_id << endl; +    q.pop(); +    const Hypergraph::Node& node = hg.nodes_[cur_node_id]; +    const unsigned num_in_edges = node.in_edges_.size(); +    unsigned sampled_edge = 0; +    if (num_in_edges == 1) { +      sampled_edge = node.in_edges_[0]; +    } else { +      //prob_t z; +      assert(num_in_edges > 1); +      SampleSet<prob_t> ss; +      for (unsigned j = 0; j < num_in_edges; ++j) { +        const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; +        prob_t p = edge.edge_prob_; +        for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) +          p *= node_probs[edge.tail_nodes_[k]]; +        ss.add(p); +//        cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; +        //z += p; +      } +//      for (unsigned j = 0; j < num_in_edges; ++j) { +//        const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; +//        cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; +//      } +//      cerr << " --- \n"; +      sampled_edge = node.in_edges_[rng->SelectSample(ss)]; +    } +    sampled_deriv->push_back(sampled_edge); +    const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; +    for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { +      q.push(edge.tail_nodes_[j]); +    } +  } +//  for (unsigned i = 0; i < sampled_deriv->size(); ++i) { +//    cerr << *hg.edges_[(*sampled_deriv)[i]].rule_ << endl; +//  } +} + +void IncrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { +  for (unsigned i = 0; i < d.size(); ++i) +    plm->Increment(*hg.edges_[d[i]].rule_, rng); +} + +void DecrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { +  for (unsigned i = 0; i < d.size(); ++i) +    plm->Decrement(*hg.edges_[d[i]].rule_, rng); +} + +prob_t TotalProb(const Hypergraph& hg) { +  return Inside<prob_t, EdgeProb>(hg); +} + +void IncrementLatticePath(const Hypergraph& hg, const vector<unsigned>& d, Lattice* pl) { +  Lattice& lat = *pl; +  for (int i = 0; i < d.size(); ++i) { +    const Hypergraph::Edge& edge = hg.edges_[d[i]]; +    if (edge.rule_->Arity() != 0) continue; +    WordID sym = edge.rule_->e_[0]; +    vector<LatticeArc>& las = lat[edge.i_]; +    int dist = edge.j_ - edge.i_; +    assert(dist > 0); +    for (int j = 0; j < las.size(); ++j) { +      if (las[j].dist2next == dist && +          las[j].label == sym) { +        las[j].cost += 1; +      } +    } +  } +} + +int main(int argc, char** argv) { +  po::variables_map conf; + +  InitCommandLine(argc, argv, &conf); +  vector<GrammarPtr> grammars(2); +  grammars[0].reset(new GlueGrammar("S","X")); +  const unsigned samples = conf["samples"].as<unsigned>(); + +  if (conf.count("random_seed")) +    prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); +  else +    prng.reset(new MT19937); +  MT19937& rng = *prng; +  vector<Lattice> corpuse; +  set<WordID> vocabe; +  cerr << "Reading corpus...\n"; +  const unsigned toks = ReadCorpus(conf["input"].as<string>(), &corpuse, &vocabe); +  cerr << "E-corpus size: " << corpuse.size() << " lattices\t (" << vocabe.size() << " word types)\n"; +  UnigramModel lm(vocabe.size()); +  vector<Hypergraph> hgs(corpuse.size()); +  vector<vector<unsigned> > derivs(corpuse.size()); +  for (int i = 0; i < corpuse.size(); ++i) { +    grammars[1].reset(new PassThroughGrammar(corpuse[i], "X")); +    ExhaustiveBottomUpParser parser("S", grammars); +    bool res = parser.Parse(corpuse[i], &hgs[i]);  // exhaustive parse +    assert(res); +  } + +  double csamples = 0; +  for (int SS=0; SS < samples; ++SS) { +    const bool is_last = ((samples - 1) == SS); +    prob_t dlh = prob_t::One(); +    bool record_sample = (SS > (samples * 1 / 3) && (SS % 5 == 3)); +    if (record_sample) csamples++; +    for (int ci = 0; ci < corpuse.size(); ++ci) { +      Lattice& lat = corpuse[ci]; +      Hypergraph& hg = hgs[ci]; +      vector<unsigned>& d = derivs[ci]; +      if (!is_last) DecrementDerivation(hg, d, &lm, &rng); +      for (unsigned i = 0; i < hg.edges_.size(); ++i) { +        TRule& r = *hg.edges_[i].rule_; +        if (r.Arity() != 0) +          hg.edges_[i].edge_prob_ = prob_t::One(); +        else +          hg.edges_[i].edge_prob_ = lm.Prob(r); +      } +      if (!is_last) { +        d.clear(); +        SampleDerivation(hg, &rng, &d); +        IncrementDerivation(hg, derivs[ci], &lm, &rng); +      } else { +        prob_t p = TotalProb(hg); +        dlh *= p; +        cerr << " p(sentence) = " << log(p) << "\t" << log(dlh) << endl; +      } +      if (record_sample) IncrementLatticePath(hg, derivs[ci], &lat); +    } +    double llh = log(lm.Likelihood()); +    cerr << "LLH=" << llh << "\tENTROPY=" << (-llh / log(2) / toks) << "\tPPL=" << pow(2, -llh / log(2) / toks) << endl; +    if (SS % 10 == 9) lm.ResampleHyperparameters(&rng); +    if (is_last) { +      double z = log(dlh); +      cerr << "TOTAL_PROB=" << z << "\tENTROPY=" << (-z / log(2) / toks) << "\tPPL=" << pow(2, -z / log(2) / toks) << endl; +    } +  } +  cerr << lm.crp << endl; +  cerr << lm.glue << endl; +  for (int i = 0; i < corpuse.size(); ++i) { +    for (int j = 0; j < corpuse[i].size(); ++j) +      for (int k = 0; k < corpuse[i][j].size(); ++k) { +        corpuse[i][j][k].cost /= csamples; +        corpuse[i][j][k].cost += 1e-3; +        corpuse[i][j][k].cost = log(corpuse[i][j][k].cost); +      } +    cout << HypergraphIO::AsPLF(corpuse[i]) << endl; +  } +  return 0; +} + diff --git a/gi/pf/brat.cc b/gi/pf/brat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/brat.cc +++ b/gi/pf/brat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) {      cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n";      return 1;    } -  shared_ptr<MT19937> prng; +  boost::shared_ptr<MT19937> prng;    if (conf.count("random_seed"))      prng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));    else diff --git a/gi/pf/cfg_wfst_composer.cc b/gi/pf/cfg_wfst_composer.cc index a31b5be8..20520c81 100644 --- a/gi/pf/cfg_wfst_composer.cc +++ b/gi/pf/cfg_wfst_composer.cc @@ -16,7 +16,6 @@  #include "tdict.h"  #include "hg.h" -using boost::shared_ptr;  namespace po = boost::program_options;  using namespace std;  using namespace std::tr1; @@ -114,7 +113,7 @@ struct Edge {    const Edge* const active_parent;    // back pointer, NULL for PREDICT items    const Edge* const passive_parent;   // back pointer, NULL for SCAN and PREDICT items    TRulePtr tps;   // translations -  shared_ptr<SparseVector<double> > features; // features from CFG rule +  boost::shared_ptr<SparseVector<double> > features; // features from CFG rule    bool IsPassive() const {      // when a rule is completed, this value will be set diff --git a/gi/pf/condnaive.cc b/gi/pf/condnaive.cc index 3ea88016..419731ac 100644 --- a/gi/pf/condnaive.cc +++ b/gi/pf/condnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    }  } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  struct ModelAndData {    explicit ModelAndData(ConditionalParallelSegementationModel<PhraseConditionalBase>& m, const vector<vector<int> >& ce, const vector<vector<int> >& cf, const set<int>& ve, const set<int>& vf) : diff --git a/gi/pf/dpnaive.cc b/gi/pf/dpnaive.cc index 469dff5c..75ccad72 100644 --- a/gi/pf/dpnaive.cc +++ b/gi/pf/dpnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    }  } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  template <typename Base>  struct ModelAndData { diff --git a/gi/pf/hpyp_tm.cc b/gi/pf/hpyp_tm.cc new file mode 100644 index 00000000..784f9958 --- /dev/null +++ b/gi/pf/hpyp_tm.cc @@ -0,0 +1,133 @@ +#include "hpyp_tm.h" + +#include <tr1/unordered_map> +#include <iostream> +#include <queue> + +#include "tdict.h" +#include "ccrp.h" +#include "pyp_word_model.h" +#include "tied_resampler.h" + +using namespace std; +using namespace std::tr1; + +struct FreqBinner { +  FreqBinner(const std::string& fname) { fd_.Load(fname); } +  unsigned NumberOfBins() const { return fd_.Max() + 1; } +  unsigned Bin(const WordID& w) const { return fd_.LookUp(w); } +  FreqDict<unsigned> fd_; +}; + +template <typename Base, class Binner = FreqBinner> +struct ConditionalPYPWordModel { +  ConditionalPYPWordModel(Base* b, const Binner* bnr = NULL) : +      base(*b), +      binner(bnr), +      btr(binner ? binner->NumberOfBins() + 1u : 2u) {} + +  void Summary() const { +    cerr << "Number of conditioning contexts: " << r.size() << endl; +    for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { +      cerr << TD::Convert(it->first) << "   \tPYP(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << endl; +      for (CCRP<vector<WordID> >::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) +        cerr << "   " << i2->second.total_dish_count_ << '\t' << TD::GetString(i2->first) << endl; +    } +  } + +  void ResampleHyperparameters(MT19937* rng) { +    btr.ResampleHyperparameters(rng); +  }  + +  prob_t Prob(const WordID src, const vector<WordID>& trglets) const { +    RuleModelHash::const_iterator it = r.find(src); +    if (it == r.end()) { +      return base(trglets); +    } else { +      return it->second.prob(trglets, base(trglets)); +    } +  } + +  void Increment(const WordID src, const vector<WordID>& trglets, MT19937* rng) { +    RuleModelHash::iterator it = r.find(src); +    if (it == r.end()) { +      it = r.insert(make_pair(src, CCRP<vector<WordID> >(0.5,1.0))).first; +      static const WordID kNULL = TD::Convert("NULL"); +      unsigned bin = (src == kNULL ? 0 : 1); +      if (binner && bin) { bin = binner->Bin(src) + 1; } +      btr.Add(bin, &it->second); +    } +    if (it->second.increment(trglets, base(trglets), rng)) +      base.Increment(trglets, rng); +  } + +  void Decrement(const WordID src, const vector<WordID>& trglets, MT19937* rng) { +    RuleModelHash::iterator it = r.find(src); +    assert(it != r.end()); +    if (it->second.decrement(trglets, rng)) { +      base.Decrement(trglets, rng); +    } +  } + +  prob_t Likelihood() const { +    prob_t p = prob_t::One(); +    for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { +      prob_t q; q.logeq(it->second.log_crp_prob()); +      p *= q; +    } +    return p; +  } + +  unsigned UniqueConditioningContexts() const { +    return r.size(); +  } + +  // TODO tie PYP hyperparameters based on source word frequency bins +  Base& base; +  const Binner* binner; +  BinTiedResampler<CCRP<vector<WordID> > > btr; +  typedef unordered_map<WordID, CCRP<vector<WordID> > > RuleModelHash; +  RuleModelHash r; +}; + +HPYPLexicalTranslation::HPYPLexicalTranslation(const vector<vector<WordID> >& lets, +                                               const unsigned vocab_size, +                                               const unsigned num_letters) : +    letters(lets), +    base(vocab_size, num_letters, 5), +    up0(new PYPWordModel<PoissonUniformWordModel>(&base)), +    tmodel(new ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel> >(up0, new FreqBinner("10k.freq"))), +    kX(-TD::Convert("X")) {} + +void HPYPLexicalTranslation::Summary() const { +  tmodel->Summary(); +  up0->Summary(); +} + +prob_t HPYPLexicalTranslation::Likelihood() const { +  prob_t p = up0->Likelihood(); +  p *= tmodel->Likelihood(); +  return p; +} + +void HPYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { +  tmodel->ResampleHyperparameters(rng); +  up0->ResampleHyperparameters(rng); +} + +unsigned HPYPLexicalTranslation::UniqueConditioningContexts() const { +  return tmodel->UniqueConditioningContexts(); +} + +prob_t HPYPLexicalTranslation::Prob(WordID src, WordID trg) const { +  return tmodel->Prob(src, letters[trg]); +} + +void HPYPLexicalTranslation::Increment(WordID src, WordID trg, MT19937* rng) { +  tmodel->Increment(src, letters[trg], rng); +} + +void HPYPLexicalTranslation::Decrement(WordID src, WordID trg, MT19937* rng) { +  tmodel->Decrement(src, letters[trg], rng); +} + diff --git a/gi/pf/hpyp_tm.h b/gi/pf/hpyp_tm.h new file mode 100644 index 00000000..af3215ba --- /dev/null +++ b/gi/pf/hpyp_tm.h @@ -0,0 +1,38 @@ +#ifndef HPYP_LEX_TRANS +#define HPYP_LEX_TRANS + +#include <vector> +#include "wordid.h" +#include "prob.h" +#include "sampler.h" +#include "freqdict.h" +#include "poisson_uniform_word_model.h" + +struct FreqBinner; +template <class B> struct PYPWordModel; +template <typename T, class B> struct ConditionalPYPWordModel; + +struct HPYPLexicalTranslation { +  explicit HPYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, +                                 const unsigned vocab_size, +                                 const unsigned num_letters); + +  prob_t Likelihood() const; + +  void ResampleHyperparameters(MT19937* rng); +  prob_t Prob(WordID src, WordID trg) const;  // return p(trg | src) +  void Summary() const; +  void Increment(WordID src, WordID trg, MT19937* rng); +  void Decrement(WordID src, WordID trg, MT19937* rng); +  unsigned UniqueConditioningContexts() const; + + private: +  const std::vector<std::vector<WordID> >& letters;   // spelling dictionary +  PoissonUniformWordModel base;  // "generator" of English types +  PYPWordModel<PoissonUniformWordModel>* up0;  // model English lexicon +  ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel>, FreqBinner>* tmodel;  // translation distributions +                      // (model English word | French word) +  const WordID kX; +}; + +#endif diff --git a/gi/pf/itg.cc b/gi/pf/itg.cc index a38fe672..29ec3860 100644 --- a/gi/pf/itg.cc +++ b/gi/pf/itg.cc @@ -231,7 +231,7 @@ int main(int argc, char** argv) {      cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n";      return 1;    } -  shared_ptr<MT19937> prng; +  boost::shared_ptr<MT19937> prng;    if (conf.count("random_seed"))      prng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));    else diff --git a/gi/pf/learn_cfg.cc b/gi/pf/learn_cfg.cc index ed1772bf..44eaa162 100644 --- a/gi/pf/learn_cfg.cc +++ b/gi/pf/learn_cfg.cc @@ -24,7 +24,7 @@ using namespace std;  using namespace tr1;  namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  vector<int> nt_vocab;  vector<int> nt_id_to_index;  static unsigned kMAX_RULE_SIZE = 0; diff --git a/gi/pf/mh_test.cc b/gi/pf/mh_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/mh_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include <vector> +#include <iostream> + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + +  Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} + +  double p0(int x) const { +    assert(x > 0); +    assert(x < 5); +    return 1.0/4.0; +  } + +  double llh() const { +    double lh = bp + base.log_crp_prob(); +    for (int ctx = 1; ctx < 5; ++ctx) +      lh += ccrps[ctx].log_crp_prob(); +    return lh; +  } + +  double prob(int ctx, int x) const { +    assert(ctx > 0 && ctx < 5); +    return ccrps[ctx].prob(x, base.prob(x, p0(x))); +  } + +  void increment(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { +      if (base.increment(x, p0(x), &rng)) { +        bp += log(1.0 / 4.0); +      } +    } +  } + +  // this is just a biased estimate +  double est_base_prob(int x) { +    return (x + 1) * x / 40.0; +  } + +  void increment_is(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    SampleSet<double> ss; +    const int PARTICLES = 25; +    vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); +    vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); +    vector<double> sp0s(PARTICLES); + +    CCRP<int> s1 = ccrps[ctx]; +    CCRP<int> sb = base; +    double sp0 = bp; +    for (int pp = 0; pp < PARTICLES; ++pp) { +      if (pp > 0) { +        ccrps[ctx] = s1; +        base = sb; +        bp = sp0; +      } + +      double q = 1; +      double gamma = 1; +      double est_p = est_base_prob(x); +      //base.prob(x, p0(x)) + rng.next() * 0.1; +      if (ccrps[ctx].increment(x, est_p, &rng, &q)) { +        gamma = q * base.prob(x, p0(x)); +        q *= est_p; +        if (verbose) cerr << "(DP-base draw) "; +        double qq = -1; +        if (base.increment(x, p0(x), &rng, &qq)) { +          if (verbose) cerr << "(G0 draw) "; +          bp += log(p0(x)); +          qq *= p0(x); +        } +      } else { gamma = q; } +      double w = gamma / q; +      if (verbose) +        cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; +      ss.add(w); +      s1s[pp] = ccrps[ctx]; +      sbs[pp] = base; +      sp0s[pp] = bp; +    } +    int ps = rng.SelectSample(ss); +    ccrps[ctx] = s1s[ps]; +    base = sbs[ps]; +    bp = sp0s[ps]; +    if (verbose) { +      cerr << "SELECTED: " << ps << endl; +      static int cc = 0; cc++; if (cc ==10) exit(1); +    } +  } + +  void decrement(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    if (ccrps[ctx].decrement(x, &rng)) { +      if (base.decrement(x, &rng)) { +        bp -= log(p0(x)); +      } +    } +  } + +  double bp; +  CCRP<int> base; +  vector<CCRP<int> > ccrps; + +}; + +int main(int argc, char** argv) { +  if (argc > 1) { verbose = true; } +  vector<int> counts(15, 0); +  vector<int> tcounts(15, 0); +  int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; +  double tlh = 0; +  double tt = 0; +  for (int n = 0; n < 1000; ++n) { +    if (n % 10 == 0) cerr << '.'; +    if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; +    Model m; +    for (int *x = points; *x; x += 2) +      m.increment(x[0], x[1]); + +    for (int j = 0; j < 24; ++j) { +      for (int *x = points; *x; x += 2) { +        if (rng.next() < 0.8) { +          m.decrement(x[0], x[1]); +          m.increment_is(x[0], x[1]); +        } +      } +    } +    counts[m.base.num_customers()]++; +    tcounts[m.base.num_tables()]++; +    tlh += m.llh(); +    tt += 1.0; +  } +  cerr << "mean LLH = " << (tlh / tt) << endl; +  for (int i = 0; i < 15; ++i) +    cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/gi/pf/pf_test.cc b/gi/pf/pf_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/pf_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include <vector> +#include <iostream> + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + +  Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} + +  double p0(int x) const { +    assert(x > 0); +    assert(x < 5); +    return 1.0/4.0; +  } + +  double llh() const { +    double lh = bp + base.log_crp_prob(); +    for (int ctx = 1; ctx < 5; ++ctx) +      lh += ccrps[ctx].log_crp_prob(); +    return lh; +  } + +  double prob(int ctx, int x) const { +    assert(ctx > 0 && ctx < 5); +    return ccrps[ctx].prob(x, base.prob(x, p0(x))); +  } + +  void increment(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { +      if (base.increment(x, p0(x), &rng)) { +        bp += log(1.0 / 4.0); +      } +    } +  } + +  // this is just a biased estimate +  double est_base_prob(int x) { +    return (x + 1) * x / 40.0; +  } + +  void increment_is(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    SampleSet<double> ss; +    const int PARTICLES = 25; +    vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); +    vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); +    vector<double> sp0s(PARTICLES); + +    CCRP<int> s1 = ccrps[ctx]; +    CCRP<int> sb = base; +    double sp0 = bp; +    for (int pp = 0; pp < PARTICLES; ++pp) { +      if (pp > 0) { +        ccrps[ctx] = s1; +        base = sb; +        bp = sp0; +      } + +      double q = 1; +      double gamma = 1; +      double est_p = est_base_prob(x); +      //base.prob(x, p0(x)) + rng.next() * 0.1; +      if (ccrps[ctx].increment(x, est_p, &rng, &q)) { +        gamma = q * base.prob(x, p0(x)); +        q *= est_p; +        if (verbose) cerr << "(DP-base draw) "; +        double qq = -1; +        if (base.increment(x, p0(x), &rng, &qq)) { +          if (verbose) cerr << "(G0 draw) "; +          bp += log(p0(x)); +          qq *= p0(x); +        } +      } else { gamma = q; } +      double w = gamma / q; +      if (verbose) +        cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; +      ss.add(w); +      s1s[pp] = ccrps[ctx]; +      sbs[pp] = base; +      sp0s[pp] = bp; +    } +    int ps = rng.SelectSample(ss); +    ccrps[ctx] = s1s[ps]; +    base = sbs[ps]; +    bp = sp0s[ps]; +    if (verbose) { +      cerr << "SELECTED: " << ps << endl; +      static int cc = 0; cc++; if (cc ==10) exit(1); +    } +  } + +  void decrement(int ctx, int x) { +    assert(ctx > 0 && ctx < 5); +    if (ccrps[ctx].decrement(x, &rng)) { +      if (base.decrement(x, &rng)) { +        bp -= log(p0(x)); +      } +    } +  } + +  double bp; +  CCRP<int> base; +  vector<CCRP<int> > ccrps; + +}; + +int main(int argc, char** argv) { +  if (argc > 1) { verbose = true; } +  vector<int> counts(15, 0); +  vector<int> tcounts(15, 0); +  int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; +  double tlh = 0; +  double tt = 0; +  for (int n = 0; n < 1000; ++n) { +    if (n % 10 == 0) cerr << '.'; +    if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; +    Model m; +    for (int *x = points; *x; x += 2) +      m.increment(x[0], x[1]); + +    for (int j = 0; j < 24; ++j) { +      for (int *x = points; *x; x += 2) { +        if (rng.next() < 0.8) { +          m.decrement(x[0], x[1]); +          m.increment_is(x[0], x[1]); +        } +      } +    } +    counts[m.base.num_customers()]++; +    tcounts[m.base.num_tables()]++; +    tlh += m.llh(); +    tt += 1.0; +  } +  cerr << "mean LLH = " << (tlh / tt) << endl; +  for (int i = 0; i < 15; ++i) +    cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/gi/pf/pfbrat.cc b/gi/pf/pfbrat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/pfbrat.cc +++ b/gi/pf/pfbrat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) {      cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n";      return 1;    } -  shared_ptr<MT19937> prng; +  boost::shared_ptr<MT19937> prng;    if (conf.count("random_seed"))      prng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));    else diff --git a/gi/pf/pfdist.cc b/gi/pf/pfdist.cc index 3d578db2..a3e46064 100644 --- a/gi/pf/pfdist.cc +++ b/gi/pf/pfdist.cc @@ -23,7 +23,7 @@ using namespace std;  using namespace tr1;  namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    po::options_description opts("Configuration options"); diff --git a/gi/pf/pfnaive.cc b/gi/pf/pfnaive.cc index e1a53f5c..958ec4e2 100644 --- a/gi/pf/pfnaive.cc +++ b/gi/pf/pfnaive.cc @@ -25,7 +25,7 @@ using namespace std;  using namespace tr1;  namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    po::options_description opts("Configuration options"); diff --git a/gi/pf/poisson_uniform_word_model.h b/gi/pf/poisson_uniform_word_model.h new file mode 100644 index 00000000..76204a0e --- /dev/null +++ b/gi/pf/poisson_uniform_word_model.h @@ -0,0 +1,50 @@ +#ifndef _POISSON_UNIFORM_WORD_MODEL_H_ +#define _POISSON_UNIFORM_WORD_MODEL_H_ + +#include <cmath> +#include <vector> +#include "prob.h" +#include "m.h" + +// len ~ Poisson(lambda) +//   for (1..len) +//     e_i ~ Uniform({Vocabulary}) +struct PoissonUniformWordModel { +  explicit PoissonUniformWordModel(const unsigned vocab_size, +                                   const unsigned alphabet_size, +                                   const double mean_len = 5) : +    lh(prob_t::One()), +    v0(-std::log(vocab_size)), +    u0(-std::log(alphabet_size)), +    mean_length(mean_len) {} + +  void ResampleHyperparameters(MT19937*) {} + +  inline prob_t operator()(const std::vector<WordID>& s) const { +    prob_t p; +    p.logeq(Md::log_poisson(s.size(), mean_length) + s.size() * u0); +    //p.logeq(v0); +    return p; +  } + +  inline void Increment(const std::vector<WordID>& w, MT19937*) { +    lh *= (*this)(w); +  } + +  inline void Decrement(const std::vector<WordID>& w, MT19937 *) { +    lh /= (*this)(w); +  } + +  inline prob_t Likelihood() const { return lh; } + +  void Summary() const {} + + private: + +  prob_t lh;  // keeps track of the draws from the base distribution +  const double v0;  // uniform log prob of generating a word +  const double u0;  // uniform log prob of generating a letter +  const double mean_length;  // mean length of a word in the base distribution +}; + +#endif diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc index 91029688..e2b67e17 100644 --- a/gi/pf/pyp_lm.cc +++ b/gi/pf/pyp_lm.cc @@ -25,7 +25,7 @@ using namespace std;  using namespace tr1;  namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng;  void InitCommandLine(int argc, char** argv, po::variables_map* conf) {    po::options_description opts("Configuration options"); diff --git a/gi/pf/pyp_tm.cc b/gi/pf/pyp_tm.cc index e21f0267..6bc8a5bf 100644 --- a/gi/pf/pyp_tm.cc +++ b/gi/pf/pyp_tm.cc @@ -91,26 +91,23 @@ struct ConditionalPYPWordModel {  };  PYPLexicalTranslation::PYPLexicalTranslation(const vector<vector<WordID> >& lets, +                                             const unsigned vocab_size,                                               const unsigned num_letters) :      letters(lets), -    up0(new PYPWordModel(num_letters)), -    tmodel(new ConditionalPYPWordModel<PYPWordModel>(up0, new FreqBinner("10k.freq"))), +    base(vocab_size, num_letters, 5), +    tmodel(new ConditionalPYPWordModel<PoissonUniformWordModel>(&base, new FreqBinner("10k.freq"))),      kX(-TD::Convert("X")) {}  void PYPLexicalTranslation::Summary() const {    tmodel->Summary(); -  up0->Summary();  }  prob_t PYPLexicalTranslation::Likelihood() const { -  prob_t p = up0->Likelihood(); -  p *= tmodel->Likelihood(); -  return p; +  return tmodel->Likelihood() * base.Likelihood();  }  void PYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) {    tmodel->ResampleHyperparameters(rng); -  up0->ResampleHyperparameters(rng);  }  unsigned PYPLexicalTranslation::UniqueConditioningContexts() const { diff --git a/gi/pf/pyp_tm.h b/gi/pf/pyp_tm.h index 63e7c96d..2b076a25 100644 --- a/gi/pf/pyp_tm.h +++ b/gi/pf/pyp_tm.h @@ -6,13 +6,14 @@  #include "prob.h"  #include "sampler.h"  #include "freqdict.h" +#include "poisson_uniform_word_model.h"  struct FreqBinner; -struct PYPWordModel;  template <typename T, class B> struct ConditionalPYPWordModel;  struct PYPLexicalTranslation {    explicit PYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, +                                 const unsigned vocab_size,                                   const unsigned num_letters);    prob_t Likelihood() const; @@ -26,8 +27,8 @@ struct PYPLexicalTranslation {   private:    const std::vector<std::vector<WordID> >& letters;   // spelling dictionary -  PYPWordModel* up0;  // base distribuction (model English word) -  ConditionalPYPWordModel<PYPWordModel, FreqBinner>* tmodel;  // translation distributions +  PoissonUniformWordModel base;  // "generator" of English types +  ConditionalPYPWordModel<PoissonUniformWordModel, FreqBinner>* tmodel;  // translation distributions                        // (model English word | French word)    const WordID kX;  }; diff --git a/gi/pf/pyp_word_model.cc b/gi/pf/pyp_word_model.cc deleted file mode 100644 index 12df4abf..00000000 --- a/gi/pf/pyp_word_model.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "pyp_word_model.h" - -#include <iostream> - -using namespace std; - -void PYPWordModel::ResampleHyperparameters(MT19937* rng) { -  r.resample_hyperparameters(rng); -  cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; -} - -void PYPWordModel::Summary() const { -  cerr << "PYPWordModel: generations=" << r.num_customers() -       << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << endl; -  for (CCRP<vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) -    cerr << "   " << it->second.total_dish_count_ -              << " (on " << it->second.table_counts_.size() << " tables) " -              << TD::GetString(it->first) << endl; -} - diff --git a/gi/pf/pyp_word_model.h b/gi/pf/pyp_word_model.h index ff366865..224a9034 100644 --- a/gi/pf/pyp_word_model.h +++ b/gi/pf/pyp_word_model.h @@ -11,48 +11,52 @@  #include "os_phrase.h"  // PYP(d,s,poisson-uniform) represented as a CRP +template <class Base>  struct PYPWordModel { -  explicit PYPWordModel(const unsigned vocab_e_size, const double mean_len = 5) : -      base(prob_t::One()), r(1,1,1,1,0.66,50.0), u0(-std::log(vocab_e_size)), mean_length(mean_len) {} - -  void ResampleHyperparameters(MT19937* rng); +  explicit PYPWordModel(Base* b) : +      base(*b), +      r(1,1,1,1,0.66,50.0) +    {} + +  void ResampleHyperparameters(MT19937* rng) { +    r.resample_hyperparameters(rng); +    std::cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; +  }    inline prob_t operator()(const std::vector<WordID>& s) const { -    return r.prob(s, p0(s)); +    return r.prob(s, base(s));    }    inline void Increment(const std::vector<WordID>& s, MT19937* rng) { -    if (r.increment(s, p0(s), rng)) -      base *= p0(s); +    if (r.increment(s, base(s), rng)) +      base.Increment(s, rng);    }    inline void Decrement(const std::vector<WordID>& s, MT19937 *rng) {      if (r.decrement(s, rng)) -      base /= p0(s); +      base.Decrement(s, rng);    }    inline prob_t Likelihood() const {      prob_t p; p.logeq(r.log_crp_prob()); -    p *= base; +    p *= base.Likelihood();      return p;    } -  void Summary() const; - - private: -  inline double logp0(const std::vector<WordID>& s) const { -    return Md::log_poisson(s.size(), mean_length) + s.size() * u0; +  void Summary() const { +    std::cerr << "PYPWordModel: generations=" << r.num_customers() +         << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << std::endl; +    for (typename CCRP<std::vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) { +      std::cerr << "   " << it->second.total_dish_count_ +                << " (on " << it->second.table_counts_.size() << " tables) " +                << TD::GetString(it->first) << std::endl; +    }    } -  inline prob_t p0(const std::vector<WordID>& s) const { -    prob_t p; p.logeq(logp0(s)); -    return p; -  } + private: -  prob_t base;  // keeps track of the draws from the base distribution +  Base& base;  // keeps track of the draws from the base distribution    CCRP<std::vector<WordID> > r; -  const double u0;  // uniform log prob of generating a letter -  const double mean_length;  // mean length of a word in the base distribution  };  #endif diff --git a/gi/pf/quasi_model2.h b/gi/pf/quasi_model2.h index 588c8f84..4075affe 100644 --- a/gi/pf/quasi_model2.h +++ b/gi/pf/quasi_model2.h @@ -9,6 +9,7 @@  #include "array2d.h"  #include "slice_sampler.h"  #include "m.h" +#include "have_64_bits.h"  struct AlignmentObservation {    AlignmentObservation() : src_len(), trg_len(), j(), a_j() {} @@ -20,13 +21,23 @@ struct AlignmentObservation {    unsigned short a_j;  }; +#ifdef HAVE_64_BITS  inline size_t hash_value(const AlignmentObservation& o) {    return reinterpret_cast<const size_t&>(o);  } -  inline bool operator==(const AlignmentObservation& a, const AlignmentObservation& b) {    return hash_value(a) == hash_value(b);  } +#else +inline size_t hash_value(const AlignmentObservation& o) { +  size_t h = 1; +  boost::hash_combine(h, o.src_len); +  boost::hash_combine(h, o.trg_len); +  boost::hash_combine(h, o.j); +  boost::hash_combine(h, o.a_j); +  return h; +} +#endif  struct QuasiModel2 {    explicit QuasiModel2(double alpha, double pnull = 0.1) : diff --git a/gi/pf/tied_resampler.h b/gi/pf/tied_resampler.h index 6f45fbce..a4f4af36 100644 --- a/gi/pf/tied_resampler.h +++ b/gi/pf/tied_resampler.h @@ -78,10 +78,8 @@ struct TiedResampler {                              std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations);      std::cerr << "TiedCRPs(d=" << discount << ",s="                << strength << ") = " << LogLikelihood(discount, strength) << std::endl; -    for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) { -      (*it)->set_discount(discount); -      (*it)->set_strength(strength); -    } +    for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) +      (*it)->set_hyperparameters(discount, strength);    }   private:    std::set<CRP*> crps; diff --git a/klm/util/file.cc b/klm/util/file.cc index 176737fa..de206bc8 100644 --- a/klm/util/file.cc +++ b/klm/util/file.cc @@ -10,6 +10,7 @@  #include <sys/stat.h>  #include <fcntl.h>  #include <stdint.h> +#include <unistd.h>  #if defined(_WIN32) || defined(_WIN64)  #include <windows.h> diff --git a/klm/util/mmap.cc b/klm/util/mmap.cc index 3b1c58b8..2db35b56 100644 --- a/klm/util/mmap.cc +++ b/klm/util/mmap.cc @@ -14,6 +14,7 @@  #include <sys/types.h>  #include <sys/stat.h>  #include <stdlib.h> +#include <unistd.h>  #if defined(_WIN32) || defined(_WIN64)  #include <windows.h> diff --git a/mira/kbest_mira.cc b/mira/kbest_mira.cc index 9fda9b32..60c9ac2b 100644 --- a/mira/kbest_mira.cc +++ b/mira/kbest_mira.cc @@ -3,10 +3,10 @@  #include <vector>  #include <cassert>  #include <cmath> +#include <tr1/memory>  #include "config.h" -#include <boost/shared_ptr.hpp>  #include <boost/program_options.hpp>  #include <boost/program_options/variables_map.hpp> @@ -27,11 +27,10 @@  #include "sampler.h"  using namespace std; -using boost::shared_ptr;  namespace po = boost::program_options;  bool invert_score; -boost::shared_ptr<MT19937> rng; +std::tr1::shared_ptr<MT19937> rng;  void RandomPermutation(int len, vector<int>* p_ids) {    vector<int>& ids = *p_ids; @@ -89,15 +88,15 @@ struct HypothesisInfo {  };  struct GoodBadOracle { -  shared_ptr<HypothesisInfo> good; -  shared_ptr<HypothesisInfo> bad; +  std::tr1::shared_ptr<HypothesisInfo> good; +  std::tr1::shared_ptr<HypothesisInfo> bad;  };  struct TrainingObserver : public DecoderObserver {    TrainingObserver(const int k, const DocScorer& d, bool sf, vector<GoodBadOracle>* o) : ds(d), oracles(*o), kbest_size(k), sample_forest(sf) {}    const DocScorer& ds;    vector<GoodBadOracle>& oracles; -  shared_ptr<HypothesisInfo> cur_best; +  std::tr1::shared_ptr<HypothesisInfo> cur_best;    const int kbest_size;    const bool sample_forest; @@ -109,16 +108,16 @@ struct TrainingObserver : public DecoderObserver {      UpdateOracles(smeta.GetSentenceID(), *hg);    } -  shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score) { -    shared_ptr<HypothesisInfo> h(new HypothesisInfo); +  std::tr1::shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score) { +    std::tr1::shared_ptr<HypothesisInfo> h(new HypothesisInfo);      h->features = feats;      h->mt_metric = score;      return h;    }    void UpdateOracles(int sent_id, const Hypergraph& forest) { -    shared_ptr<HypothesisInfo>& cur_good = oracles[sent_id].good; -    shared_ptr<HypothesisInfo>& cur_bad = oracles[sent_id].bad; +    std::tr1::shared_ptr<HypothesisInfo>& cur_good = oracles[sent_id].good; +    std::tr1::shared_ptr<HypothesisInfo>& cur_bad = oracles[sent_id].bad;      cur_bad.reset();  // TODO get rid of??      if (sample_forest) { diff --git a/mteval/aer_scorer.cc b/mteval/aer_scorer.cc index edd4390f..ae3192d4 100644 --- a/mteval/aer_scorer.cc +++ b/mteval/aer_scorer.cc @@ -5,7 +5,7 @@  #include <sstream>  #include "tdict.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h"  using namespace std; @@ -85,7 +85,7 @@ AERScorer::AERScorer(const vector<vector<WordID> >& refs, const string& src) : s      cerr << "AERScorer can only take a single reference!\n";      abort();    } -  ref_ = AlignmentPharaoh::ReadPharaohAlignmentGrid(TD::GetString(refs.front())); +  ref_ = AlignmentIO::ReadPharaohAlignmentGrid(TD::GetString(refs.front()));  }  static inline bool Safe(const Array2D<bool>& a, int i, int j) { @@ -101,7 +101,7 @@ ScoreP AERScorer::ScoreCCandidate(const vector<WordID>& shyp) const {  ScoreP AERScorer::ScoreCandidate(const vector<WordID>& shyp) const {    boost::shared_ptr<Array2D<bool> > hyp = -    AlignmentPharaoh::ReadPharaohAlignmentGrid(TD::GetString(shyp)); +    AlignmentIO::ReadPharaohAlignmentGrid(TD::GetString(shyp));    int m = 0;    int r = 0; diff --git a/mteval/ns.cc b/mteval/ns.cc index 788f809a..8d354677 100644 --- a/mteval/ns.cc +++ b/mteval/ns.cc @@ -14,7 +14,6 @@  #include "stringlib.h"  using namespace std; -using boost::shared_ptr;  map<string, EvaluationMetric*> EvaluationMetric::instances_; @@ -35,8 +34,8 @@ struct DefaultSegmentEvaluator : public SegmentEvaluator {    const EvaluationMetric* em_;  }; -shared_ptr<SegmentEvaluator> EvaluationMetric::CreateSegmentEvaluator(const vector<vector<WordID> >& refs) const { -  return shared_ptr<SegmentEvaluator>(new DefaultSegmentEvaluator(refs, this)); +boost::shared_ptr<SegmentEvaluator> EvaluationMetric::CreateSegmentEvaluator(const vector<vector<WordID> >& refs) const { +  return boost::shared_ptr<SegmentEvaluator>(new DefaultSegmentEvaluator(refs, this));  }  #define MAX_SS_VECTOR_SIZE 50 @@ -184,8 +183,8 @@ template <unsigned int N = 4u, BleuType BrevityType = IBM>  struct BleuMetric : public EvaluationMetric {    BleuMetric() : EvaluationMetric(BrevityType == IBM ? "IBM_BLEU" : (BrevityType == Koehn ? "KOEHN_BLEU" : "NIST_BLEU")) {}    unsigned SufficientStatisticsVectorSize() const { return N*2 + 2; } -  shared_ptr<SegmentEvaluator> CreateSegmentEvaluator(const vector<vector<WordID> >& refs) const { -    return shared_ptr<SegmentEvaluator>(new BleuSegmentEvaluator<N,BrevityType>(refs, this)); +  boost::shared_ptr<SegmentEvaluator> CreateSegmentEvaluator(const vector<vector<WordID> >& refs) const { +    return boost::shared_ptr<SegmentEvaluator>(new BleuSegmentEvaluator<N,BrevityType>(refs, this));    }    float ComputeBreakdown(const SufficientStats& stats, float* bp, vector<float>* out) const {      if (out) { out->clear(); } diff --git a/phrasinator/gibbs_train_plm.cc b/phrasinator/gibbs_train_plm.cc index 3b99e1b6..86fd7865 100644 --- a/phrasinator/gibbs_train_plm.cc +++ b/phrasinator/gibbs_train_plm.cc @@ -269,7 +269,7 @@ struct UniphraseLM {  int main(int argc, char** argv) {    po::variables_map conf;    InitCommandLine(argc, argv, &conf); -  shared_ptr<MT19937> prng; +  boost::shared_ptr<MT19937> prng;    if (conf.count("random_seed"))      prng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));    else diff --git a/phrasinator/gibbs_train_plm.notables.cc b/phrasinator/gibbs_train_plm.notables.cc index 4b431b90..9dca9e8d 100644 --- a/phrasinator/gibbs_train_plm.notables.cc +++ b/phrasinator/gibbs_train_plm.notables.cc @@ -293,7 +293,7 @@ struct UniphraseLM {  int main(int argc, char** argv) {    po::variables_map conf;    InitCommandLine(argc, argv, &conf); -  shared_ptr<MT19937> prng; +  boost::shared_ptr<MT19937> prng;    if (conf.count("random_seed"))      prng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));    else diff --git a/rst_parser/Makefile.am b/rst_parser/Makefile.am new file mode 100644 index 00000000..e97ab5c5 --- /dev/null +++ b/rst_parser/Makefile.am @@ -0,0 +1,19 @@ +bin_PROGRAMS = \ +  mst_train + +noinst_PROGRAMS = \ +  rst_test + +TESTS = rst_test + +noinst_LIBRARIES = librst.a + +librst_a_SOURCES = arc_factored.cc rst.cc + +mst_train_SOURCES = mst_train.cc +mst_train_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +rst_test_SOURCES = rst_test.cc +rst_test_LDADD = librst.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder -I$(top_srcdir)/utils -I$(top_srcdir)/mteval -I../klm diff --git a/rst_parser/arc_factored.cc b/rst_parser/arc_factored.cc new file mode 100644 index 00000000..1e75600b --- /dev/null +++ b/rst_parser/arc_factored.cc @@ -0,0 +1,31 @@ +#include "arc_factored.h" + +#include <set> + +#include <boost/pending/disjoint_sets.hpp> + +using namespace std; +using namespace boost; + +// based on Trajan 1977 +void ArcFactoredForest::MaximumSpanningTree(SpanningTree* st) const { +  typedef disjoint_sets_with_storage<identity_property_map, identity_property_map, +      find_with_full_path_compression> DisjointSet; +  DisjointSet strongly(num_words_ + 1); +  DisjointSet weakly(num_words_ + 1); +  set<unsigned> roots, h, rset; +  vector<pair<short, short> > enter(num_words_ + 1); +  for (unsigned i = 0; i <= num_words_; ++i) { +    strongly.make_set(i); +    weakly.make_set(i); +    roots.insert(i); +  } +  while(!roots.empty()) { +    set<unsigned>::iterator it = roots.begin(); +    const unsigned k = *it; +    roots.erase(it); +    cerr << "k=" << k << endl; +    pair<short,short> ij; // TODO = Max(k); +  } +} + diff --git a/rst_parser/arc_factored.h b/rst_parser/arc_factored.h new file mode 100644 index 00000000..e99be482 --- /dev/null +++ b/rst_parser/arc_factored.h @@ -0,0 +1,88 @@ +#ifndef _ARC_FACTORED_H_ +#define _ARC_FACTORED_H_ + +#include <iostream> +#include <cassert> +#include <vector> +#include <utility> +#include "array2d.h" +#include "sparse_vector.h" +#include "prob.h" +#include "weights.h" + +struct SpanningTree { +  SpanningTree() : roots(1, -1) {} +  std::vector<short> roots; // unless multiroot trees are supported, this +                            // will have a single member +  std::vector<std::pair<short, short> > h_m_pairs; +}; + +class ArcFactoredForest { + public: +  explicit ArcFactoredForest(short num_words) : +      num_words_(num_words), +      root_edges_(num_words), +      edges_(num_words, num_words) { +    for (int h = 0; h < num_words; ++h) { +      for (int m = 0; m < num_words; ++m) { +        edges_(h, m).h = h + 1; +        edges_(h, m).m = m + 1; +      } +      root_edges_[h].h = 0; +      root_edges_[h].m = h + 1; +    } +  } + +  // compute the maximum spanning tree based on the current weighting +  // using the O(n^2) CLE algorithm +  void MaximumSpanningTree(SpanningTree* st) const; + +  struct Edge { +    Edge() : h(), m(), features(), edge_prob(prob_t::Zero()) {} +    short h; +    short m; +    SparseVector<weight_t> features; +    prob_t edge_prob; +  }; + +  const Edge& operator()(short h, short m) const { +    assert(m > 0); +    assert(m <= num_words_); +    assert(h >= 0); +    assert(h <= num_words_); +    return h ? edges_(h - 1, m - 1) : root_edges_[m - 1]; +  } + +  Edge& operator()(short h, short m) { +    assert(m > 0); +    assert(m <= num_words_); +    assert(h >= 0); +    assert(h <= num_words_); +    return h ? edges_(h - 1, m - 1) : root_edges_[m - 1]; +  } + +  template <class V> +  void Reweight(const V& weights) { +    for (int m = 0; m < num_words_; ++m) { +      for (int h = 0; h < num_words_; ++h) { +        if (h != m) { +          Edge& e = edges_(h, m); +          e.edge_prob.logeq(e.features.dot(weights)); +        } +      } +      Edge& e = root_edges_[m]; +      e.edge_prob.logeq(e.features.dot(weights)); +    } +  } + + private: +  unsigned num_words_; +  std::vector<Edge> root_edges_; +  Array2D<Edge> edges_; +}; + +inline std::ostream& operator<<(std::ostream& os, const ArcFactoredForest::Edge& edge) { +  return os << "(" << edge.h << " < " << edge.m << ")"; +} + +#endif diff --git a/rst_parser/mst_train.cc b/rst_parser/mst_train.cc new file mode 100644 index 00000000..7b5af4c1 --- /dev/null +++ b/rst_parser/mst_train.cc @@ -0,0 +1,12 @@ +#include "arc_factored.h" + +#include <iostream> + +using namespace std; + +int main(int argc, char** argv) { +  ArcFactoredForest af(5); +  cerr << af(0,3) << endl; +  return 0; +} + diff --git a/rst_parser/rst.cc b/rst_parser/rst.cc new file mode 100644 index 00000000..f6b295b3 --- /dev/null +++ b/rst_parser/rst.cc @@ -0,0 +1,7 @@ +#include "rst.h" + +using namespace std; + +StochasticForest::StochasticForest(const ArcFactoredForest& af) { +} + diff --git a/rst_parser/rst.h b/rst_parser/rst.h new file mode 100644 index 00000000..865871eb --- /dev/null +++ b/rst_parser/rst.h @@ -0,0 +1,10 @@ +#ifndef _RST_H_ +#define _RST_H_ + +#include "arc_factored.h" + +struct StochasticForest { +  explicit StochasticForest(const ArcFactoredForest& af); +}; + +#endif diff --git a/rst_parser/rst_test.cc b/rst_parser/rst_test.cc new file mode 100644 index 00000000..e8fe706e --- /dev/null +++ b/rst_parser/rst_test.cc @@ -0,0 +1,33 @@ +#include "arc_factored.h" + +#include <iostream> + +using namespace std; + +int main(int argc, char** argv) { +  // John saw Mary +  //   (H -> M) +  //   (1 -> 2) 20 +  //   (1 -> 3) 3 +  //   (2 -> 1) 20 +  //   (2 -> 3) 30 +  //   (3 -> 2) 0 +  //   (3 -> 1) 11 +  //   (0, 2) 10 +  //   (0, 1) 9 +  //   (0, 3) 9 +  ArcFactoredForest af(3); +  af(1,2).edge_prob.logeq(20); +  af(1,3).edge_prob.logeq(3); +  af(2,1).edge_prob.logeq(20); +  af(2,3).edge_prob.logeq(30); +  af(3,2).edge_prob.logeq(0); +  af(3,1).edge_prob.logeq(11); +  af(0,2).edge_prob.logeq(10); +  af(0,1).edge_prob.logeq(9); +  af(0,3).edge_prob.logeq(9); +  SpanningTree tree; +  af.MaximumSpanningTree(&tree); +  return 0; +} + diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc index 046e921c..9f12dba9 100644 --- a/training/mpi_batch_optimize.cc +++ b/training/mpi_batch_optimize.cc @@ -29,7 +29,6 @@ namespace mpi = boost::mpi;  #include "sparse_vector.h"  using namespace std; -using boost::shared_ptr;  namespace po = boost::program_options;  bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { @@ -270,7 +269,7 @@ int main(int argc, char** argv) {      }      Weights::InitFromFile(conf["means"].as<string>(), &means);    } -  shared_ptr<BatchOptimizer> o; +  boost::shared_ptr<BatchOptimizer> o;    if (rank == 0) {      const string omethod = conf["optimization_method"].as<string>();      if (omethod == "rprop") diff --git a/training/mr_optimize_reduce.cc b/training/mr_optimize_reduce.cc index 15e28fa1..461e6b5f 100644 --- a/training/mr_optimize_reduce.cc +++ b/training/mr_optimize_reduce.cc @@ -15,7 +15,6 @@  #include "sparse_vector.h"  using namespace std; -using boost::shared_ptr;  namespace po = boost::program_options;  void SanityCheck(const vector<double>& w) { @@ -102,7 +101,7 @@ int main(int argc, char** argv) {      }      Weights::InitFromFile(conf["means"].as<string>(), &means);    } -  shared_ptr<BatchOptimizer> o; +  boost::shared_ptr<BatchOptimizer> o;    const string omethod = conf["optimization_method"].as<string>();    if (omethod == "rprop")      o.reset(new RPropOptimizer(num_feats));  // TODO add configuration diff --git a/training/optimize_test.cc b/training/optimize_test.cc index fe7ca70f..bff2ca03 100644 --- a/training/optimize_test.cc +++ b/training/optimize_test.cc @@ -102,7 +102,7 @@ void TestOnline() {    size_t N = 20;    double C = 1.0;    double eta0 = 0.2; -  shared_ptr<LearningRateSchedule> r(new ExponentialDecayLearningRate(N, eta0, 0.85)); +  std::tr1::shared_ptr<LearningRateSchedule> r(new ExponentialDecayLearningRate(N, eta0, 0.85));    //shared_ptr<LearningRateSchedule> r(new StandardLearningRate(N, eta0));    CumulativeL1OnlineOptimizer opt(r, N, C, std::vector<int>());    assert(r->eta(10) < r->eta(1)); diff --git a/utils/Makefile.am b/utils/Makefile.am index 3ea21835..2fc6ae21 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -23,7 +23,7 @@ atools_SOURCES = atools.cc  noinst_LIBRARIES = libutils.a  libutils_a_SOURCES = \ -  alignment_pharaoh.cc \ +  alignment_io.cc \    b64tools.cc \    corpus_tools.cc \    dict.cc \ diff --git a/utils/alignment_pharaoh.cc b/utils/alignment_io.cc index 890ff565..1d923f7f 100644 --- a/utils/alignment_pharaoh.cc +++ b/utils/alignment_io.cc @@ -1,12 +1,10 @@ -#include "utils/alignment_pharaoh.h" - -#include <set> +#include "utils/alignment_io.h"  using namespace std;  static bool is_digit(char x) { return x >= '0' && x <= '9'; } -boost::shared_ptr<Array2D<bool> > AlignmentPharaoh::ReadPharaohAlignmentGrid(const string& al) { +boost::shared_ptr<Array2D<bool> > AlignmentIO::ReadPharaohAlignmentGrid(const string& al) {    int max_x = 0;    int max_y = 0;    int i = 0; @@ -64,14 +62,36 @@ boost::shared_ptr<Array2D<bool> > AlignmentPharaoh::ReadPharaohAlignmentGrid(con    return grid;  } -void AlignmentPharaoh::SerializePharaohFormat(const Array2D<bool>& alignment, ostream* out) { +void AlignmentIO::SerializePharaohFormat(const Array2D<bool>& alignment, ostream* o) { +  ostream& out = *o;    bool need_space = false;    for (int i = 0; i < alignment.width(); ++i)      for (int j = 0; j < alignment.height(); ++j)        if (alignment(i,j)) { -        if (need_space) (*out) << ' '; else need_space = true; -        (*out) << i << '-' << j; +        if (need_space) out << ' '; else need_space = true; +        out << i << '-' << j;        } -  (*out) << endl; +  out << endl; +} + +void AlignmentIO::SerializeTypedAlignment(const Array2D<AlignmentType>& alignment, ostream* o) { +  ostream& out = *o; +  bool need_space = false; +  for (int i = 0; i < alignment.width(); ++i) +    for (int j = 0; j < alignment.height(); ++j) { +      const AlignmentType& aij = alignment(i,j); +      if (aij != kNONE) { +        if (need_space) out << ' '; else need_space = true; +        if (aij == kTRANSLATION) {} +        else if (aij == kTRANSLITERATION) { +          out << 'T' << ':'; +        } else { +          cerr << "\nUnexpected alignment point type: " << static_cast<int>(aij) << endl; +          abort(); +        } +        out << i << '-' << j; +      } +    } +  out << endl;  } diff --git a/utils/alignment_io.h b/utils/alignment_io.h new file mode 100644 index 00000000..36bcecd7 --- /dev/null +++ b/utils/alignment_io.h @@ -0,0 +1,42 @@ +#ifndef _ALIGNMENT_IO_H_ +#define _ALIGNMENT_IO_H_ + +#include <string> +#include <iostream> +#include <boost/shared_ptr.hpp> +#include "array2d.h" + +struct AlignmentIO { +  enum AlignmentType { kNONE = 0, kTRANSLATION = 1, kTRANSLITERATION = 2 }; + +  static boost::shared_ptr<Array2D<bool> > ReadPharaohAlignmentGrid(const std::string& al); +  static void SerializePharaohFormat(const Array2D<bool>& alignment, std::ostream* out); +  static void SerializeTypedAlignment(const Array2D<AlignmentType>& alignment, std::ostream* out); +}; + +inline std::ostream& operator<<(std::ostream& os, const Array2D<AlignmentIO::AlignmentType>& m) { +  os << ' '; +  for (int j=0; j<m.height(); ++j) +    os << (j%10); +  os << "\n"; +  for (int i=0; i<m.width(); ++i) { +    os << (i%10); +    for (int j=0; j<m.height(); ++j) { +      switch (m(i,j)) { +        case AlignmentIO::kNONE:            os << '.'; break; +        case AlignmentIO::kTRANSLATION:     os << '*'; break; +        case AlignmentIO::kTRANSLITERATION: os << '#'; break; +        default:                            os << '?'; break; +      } +    } +    os << (i%10) << "\n"; +  } +  os << ' '; +  for (int j=0; j<m.height(); ++j) +    os << (j%10); +  os << "\n"; +  return os; +} + + +#endif diff --git a/utils/alignment_pharaoh.h b/utils/alignment_pharaoh.h deleted file mode 100644 index d111c8bf..00000000 --- a/utils/alignment_pharaoh.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _PHARAOH_ALIGNMENT_H_ -#define _PHARAOH_ALIGNMENT_H_ - -#include <string> -#include <iostream> -#include <boost/shared_ptr.hpp> -#include "array2d.h" - -struct AlignmentPharaoh { -  static boost::shared_ptr<Array2D<bool> > ReadPharaohAlignmentGrid(const std::string& al); -  static void SerializePharaohFormat(const Array2D<bool>& alignment, std::ostream* out); -}; - -#endif diff --git a/utils/atools.cc b/utils/atools.cc index c0a91731..bce7822e 100644 --- a/utils/atools.cc +++ b/utils/atools.cc @@ -8,11 +8,10 @@  #include <boost/shared_ptr.hpp>  #include "filelib.h" -#include "alignment_pharaoh.h" +#include "alignment_io.h"  namespace po = boost::program_options;  using namespace std; -using boost::shared_ptr;  struct Command {    virtual ~Command() {} @@ -348,10 +347,10 @@ int main(int argc, char **argv) {        }      }      if (line1.empty() && !*in1) break; -    shared_ptr<Array2D<bool> > out(new Array2D<bool>); -    shared_ptr<Array2D<bool> > a1 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line1); +    boost::shared_ptr<Array2D<bool> > out(new Array2D<bool>); +    boost::shared_ptr<Array2D<bool> > a1 = AlignmentIO::ReadPharaohAlignmentGrid(line1);      if (in2) { -      shared_ptr<Array2D<bool> > a2 = AlignmentPharaoh::ReadPharaohAlignmentGrid(line2); +      boost::shared_ptr<Array2D<bool> > a2 = AlignmentIO::ReadPharaohAlignmentGrid(line2);        cmd.Apply(*a1, *a2, out.get());      } else {        Array2D<bool> dummy; @@ -359,7 +358,7 @@ int main(int argc, char **argv) {      }      if (cmd.Result() == 1) { -      AlignmentPharaoh::SerializePharaohFormat(*out, &cout); +      AlignmentIO::SerializePharaohFormat(*out, &cout);      }    }    if (cmd.Result() == 2) diff --git a/utils/ccrp.h b/utils/ccrp.h index 4a8b80e7..8635b422 100644 --- a/utils/ccrp.h +++ b/utils/ccrp.h @@ -55,6 +55,10 @@ class CCRP {    double discount() const { return discount_; }    double strength() const { return strength_; } +  void set_hyperparameters(double d, double s) { +    discount_ = d; strength_ = s; +    check_hyperparameters(); +  }    void set_discount(double d) { discount_ = d; check_hyperparameters(); }    void set_strength(double a) { strength_ = a; check_hyperparameters(); } @@ -93,8 +97,10 @@ class CCRP {    }    // returns +1 or 0 indicating whether a new table was opened +  //   p = probability with which the particular table was selected +  //       excluding p0    template <typename T> -  int increment(const Dish& dish, const T& p0, MT19937* rng) { +  int increment(const Dish& dish, const T& p0, MT19937* rng, T* p = NULL) {      DishLocations& loc = dish_locs_[dish];      bool share_table = false;      if (loc.total_dish_count_) { @@ -108,6 +114,7 @@ class CCRP {             ti != loc.table_counts_.end(); ++ti) {          r -= (*ti - discount_);          if (r <= 0.0) { +          if (p) { *p = T(*ti - discount_) / T(strength_ + num_customers_); }            ++(*ti);            break;          } @@ -119,6 +126,7 @@ class CCRP {        }      } else {        loc.table_counts_.push_back(1u); +      if (p) { *p = T(strength_ + discount_ * num_tables_) / T(strength_ + num_customers_); }        ++num_tables_;      }      ++loc.total_dish_count_; diff --git a/utils/mfcr.h b/utils/mfcr.h index 886f01ef..4aacb567 100644 --- a/utils/mfcr.h +++ b/utils/mfcr.h @@ -73,6 +73,10 @@ class MFCR {    double discount() const { return discount_; }    double strength() const { return strength_; } +  void set_hyperparameters(double d, double s) { +    discount_ = d; strength_ = s; +    check_hyperparameters(); +  }    void set_discount(double d) { discount_ = d; check_hyperparameters(); }    void set_strength(double a) { strength_ = a; check_hyperparameters(); }  | 
