diff options
author | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2012-04-07 16:58:55 +0200 |
---|---|---|
committer | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2012-04-07 16:58:55 +0200 |
commit | 715245dc7042ac0dca4fea94031d7c6de8058033 (patch) | |
tree | 3a7ff0b88f2e113a08aef663d2487edec0b5f67f /gi/pf | |
parent | 89211ab30937672d84a54fac8fa435805499e38d (diff) | |
parent | 6001b81eba37985d2e7dea6e6ebb488b787789a6 (diff) |
Merge remote-tracking branch 'upstream/master'
Diffstat (limited to 'gi/pf')
-rw-r--r-- | gi/pf/Makefile.am | 10 | ||||
-rw-r--r-- | gi/pf/align-lexonly-pyp.cc | 10 | ||||
-rw-r--r-- | gi/pf/align-tl.cc | 2 | ||||
-rw-r--r-- | gi/pf/bayes_lattice_score.cc | 309 | ||||
-rw-r--r-- | gi/pf/brat.cc | 2 | ||||
-rw-r--r-- | gi/pf/cfg_wfst_composer.cc | 3 | ||||
-rw-r--r-- | gi/pf/condnaive.cc | 2 | ||||
-rw-r--r-- | gi/pf/dpnaive.cc | 2 | ||||
-rw-r--r-- | gi/pf/hpyp_tm.cc | 133 | ||||
-rw-r--r-- | gi/pf/hpyp_tm.h | 38 | ||||
-rw-r--r-- | gi/pf/itg.cc | 2 | ||||
-rw-r--r-- | gi/pf/learn_cfg.cc | 2 | ||||
-rw-r--r-- | gi/pf/mh_test.cc | 148 | ||||
-rw-r--r-- | gi/pf/pf_test.cc | 148 | ||||
-rw-r--r-- | gi/pf/pfbrat.cc | 2 | ||||
-rw-r--r-- | gi/pf/pfdist.cc | 2 | ||||
-rw-r--r-- | gi/pf/pfnaive.cc | 2 | ||||
-rw-r--r-- | gi/pf/poisson_uniform_word_model.h | 50 | ||||
-rw-r--r-- | gi/pf/pyp_lm.cc | 2 | ||||
-rw-r--r-- | gi/pf/pyp_tm.cc | 11 | ||||
-rw-r--r-- | gi/pf/pyp_tm.h | 7 | ||||
-rw-r--r-- | gi/pf/pyp_word_model.cc | 20 | ||||
-rw-r--r-- | gi/pf/pyp_word_model.h | 46 | ||||
-rw-r--r-- | gi/pf/quasi_model2.h | 13 | ||||
-rw-r--r-- | gi/pf/tied_resampler.h | 6 |
25 files changed, 899 insertions, 73 deletions
diff --git a/gi/pf/Makefile.am b/gi/pf/Makefile.am index f9c979d0..86f8e07b 100644 --- a/gi/pf/Makefile.am +++ b/gi/pf/Makefile.am @@ -1,8 +1,14 @@ -bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl +bin_PROGRAMS = cbgi brat dpnaive pfbrat pfdist itg pfnaive condnaive align-lexonly-pyp learn_cfg pyp_lm nuisance_test align-tl pf_test bayes_lattice_score noinst_LIBRARIES = libpf.a -libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc pyp_word_model.cc pyp_tm.cc +libpf_a_SOURCES = base_distributions.cc reachability.cc cfg_wfst_composer.cc corpus.cc unigrams.cc ngram_base.cc transliterations.cc backward.cc hpyp_tm.cc pyp_tm.cc + +bayes_lattice_score_SOURCES = bayes_lattice_score.cc +bayes_lattice_score_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz + +pf_test_SOURCES = pf_test.cc +pf_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz nuisance_test_SOURCES = nuisance_test.cc nuisance_test_LDADD = libpf.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a $(top_srcdir)/klm/lm/libklm.a $(top_srcdir)/klm/util/libklm_util.a -lz diff --git a/gi/pf/align-lexonly-pyp.cc b/gi/pf/align-lexonly-pyp.cc index 942dcf51..e7509f57 100644 --- a/gi/pf/align-lexonly-pyp.cc +++ b/gi/pf/align-lexonly-pyp.cc @@ -11,6 +11,7 @@ #include "sampler.h" #include "corpus.h" #include "pyp_tm.h" +#include "hpyp_tm.h" #include "quasi_model2.h" using namespace std; @@ -61,15 +62,17 @@ struct AlignedSentencePair { Array2D<short> posterior; }; +template <class LexicalTranslationModel> struct Aligner { Aligner(const vector<vector<WordID> >& lets, + int vocab_size, int num_letters, const po::variables_map& conf, vector<AlignedSentencePair>* c) : corpus(*c), paj_model(conf["align_alpha"].as<double>(), conf["p_null"].as<double>()), infer_paj(conf.count("infer_alignment_hyperparameters") > 0), - model(lets, num_letters), + model(lets, vocab_size, num_letters), kNULL(TD::Convert("NULL")) { assert(lets[kNULL].size() == 0); } @@ -77,7 +80,7 @@ struct Aligner { vector<AlignedSentencePair>& corpus; QuasiModel2 paj_model; const bool infer_paj; - PYPLexicalTranslation model; + LexicalTranslationModel model; const WordID kNULL; void ResampleHyperparameters() { @@ -217,7 +220,8 @@ int main(int argc, char** argv) { ExtractLetters(vocabf, &letters, NULL); letters[TD::Convert("NULL")].clear(); - Aligner aligner(letters, letset.size(), conf, &corpus); + //Aligner<PYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus); + Aligner<HPYPLexicalTranslation> aligner(letters, vocabe.size(), letset.size(), conf, &corpus); aligner.InitializeRandom(); const unsigned samples = conf["samples"].as<unsigned>(); diff --git a/gi/pf/align-tl.cc b/gi/pf/align-tl.cc index cbe8c6c8..f6608f1d 100644 --- a/gi/pf/align-tl.cc +++ b/gi/pf/align-tl.cc @@ -58,7 +58,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; struct LexicalAlignment { unsigned char src_index; diff --git a/gi/pf/bayes_lattice_score.cc b/gi/pf/bayes_lattice_score.cc new file mode 100644 index 00000000..70cb8dc2 --- /dev/null +++ b/gi/pf/bayes_lattice_score.cc @@ -0,0 +1,309 @@ +#include <iostream> +#include <queue> + +#include <boost/functional.hpp> +#include <boost/program_options.hpp> +#include <boost/program_options/variables_map.hpp> + +#include "inside_outside.h" +#include "hg.h" +#include "hg_io.h" +#include "bottom_up_parser.h" +#include "fdict.h" +#include "grammar.h" +#include "m.h" +#include "trule.h" +#include "tdict.h" +#include "filelib.h" +#include "dict.h" +#include "sampler.h" +#include "ccrp.h" +#include "ccrp_onetable.h" + +using namespace std; +using namespace tr1; +namespace po = boost::program_options; + +boost::shared_ptr<MT19937> prng; + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("samples,s",po::value<unsigned>()->default_value(1000),"Number of samples") + ("input,i",po::value<string>(),"Read parallel data from") + ("random_seed,S",po::value<uint32_t>(), "Random seed"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value<string>(), "Configuration file") + ("help", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as<string>().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || (conf->count("input") == 0)) { + cerr << dcmdline_options << endl; + exit(1); + } +} + +unsigned ReadCorpus(const string& filename, + vector<Lattice>* e, + set<WordID>* vocab_e) { + e->clear(); + vocab_e->clear(); + ReadFile rf(filename); + istream* in = rf.stream(); + assert(*in); + string line; + unsigned toks = 0; + while(*in) { + getline(*in, line); + if (line.empty() && !*in) break; + e->push_back(Lattice()); + Lattice& le = e->back(); + LatticeTools::ConvertTextOrPLF(line, & le); + for (unsigned i = 0; i < le.size(); ++i) + for (unsigned j = 0; j < le[i].size(); ++j) + vocab_e->insert(le[i][j].label); + toks += le.size(); + } + return toks; +} + +struct BaseModel { + explicit BaseModel(unsigned tc) : + unif(1.0 / tc), p(prob_t::One()) {} + prob_t prob(const TRule& r) const { + return unif; + } + void increment(const TRule& r, MT19937* rng) { + p *= prob(r); + } + void decrement(const TRule& r, MT19937* rng) { + p /= prob(r); + } + prob_t Likelihood() const { + return p; + } + const prob_t unif; + prob_t p; +}; + +struct UnigramModel { + explicit UnigramModel(unsigned tc) : base(tc), crp(1,1,1,1), glue(1,1,1,1) {} + BaseModel base; + CCRP<TRule> crp; + CCRP<TRule> glue; + + prob_t Prob(const TRule& r) const { + if (r.Arity() != 0) { + return glue.prob(r, prob_t(0.5)); + } + return crp.prob(r, base.prob(r)); + } + + int Increment(const TRule& r, MT19937* rng) { + if (r.Arity() != 0) { + glue.increment(r, 0.5, rng); + return 0; + } else { + if (crp.increment(r, base.prob(r), rng)) { + base.increment(r, rng); + return 1; + } + return 0; + } + } + + int Decrement(const TRule& r, MT19937* rng) { + if (r.Arity() != 0) { + glue.decrement(r, rng); + return 0; + } else { + if (crp.decrement(r, rng)) { + base.decrement(r, rng); + return -1; + } + return 0; + } + } + + prob_t Likelihood() const { + prob_t p; + p.logeq(crp.log_crp_prob() + glue.log_crp_prob()); + p *= base.Likelihood(); + return p; + } + + void ResampleHyperparameters(MT19937* rng) { + crp.resample_hyperparameters(rng); + glue.resample_hyperparameters(rng); + cerr << " d=" << crp.discount() << ", s=" << crp.strength() << "\t STOP d=" << glue.discount() << ", s=" << glue.strength() << endl; + } +}; + +UnigramModel* plm; + +void SampleDerivation(const Hypergraph& hg, MT19937* rng, vector<unsigned>* sampled_deriv) { + vector<prob_t> node_probs; + Inside<prob_t, EdgeProb>(hg, &node_probs); + queue<unsigned> q; + q.push(hg.nodes_.size() - 2); + while(!q.empty()) { + unsigned cur_node_id = q.front(); +// cerr << "NODE=" << cur_node_id << endl; + q.pop(); + const Hypergraph::Node& node = hg.nodes_[cur_node_id]; + const unsigned num_in_edges = node.in_edges_.size(); + unsigned sampled_edge = 0; + if (num_in_edges == 1) { + sampled_edge = node.in_edges_[0]; + } else { + //prob_t z; + assert(num_in_edges > 1); + SampleSet<prob_t> ss; + for (unsigned j = 0; j < num_in_edges; ++j) { + const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; + prob_t p = edge.edge_prob_; + for (unsigned k = 0; k < edge.tail_nodes_.size(); ++k) + p *= node_probs[edge.tail_nodes_[k]]; + ss.add(p); +// cerr << log(ss[j]) << " ||| " << edge.rule_->AsString() << endl; + //z += p; + } +// for (unsigned j = 0; j < num_in_edges; ++j) { +// const Hypergraph::Edge& edge = hg.edges_[node.in_edges_[j]]; +// cerr << exp(log(ss[j] / z)) << " ||| " << edge.rule_->AsString() << endl; +// } +// cerr << " --- \n"; + sampled_edge = node.in_edges_[rng->SelectSample(ss)]; + } + sampled_deriv->push_back(sampled_edge); + const Hypergraph::Edge& edge = hg.edges_[sampled_edge]; + for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) { + q.push(edge.tail_nodes_[j]); + } + } +// for (unsigned i = 0; i < sampled_deriv->size(); ++i) { +// cerr << *hg.edges_[(*sampled_deriv)[i]].rule_ << endl; +// } +} + +void IncrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { + for (unsigned i = 0; i < d.size(); ++i) + plm->Increment(*hg.edges_[d[i]].rule_, rng); +} + +void DecrementDerivation(const Hypergraph& hg, const vector<unsigned>& d, UnigramModel* plm, MT19937* rng) { + for (unsigned i = 0; i < d.size(); ++i) + plm->Decrement(*hg.edges_[d[i]].rule_, rng); +} + +prob_t TotalProb(const Hypergraph& hg) { + return Inside<prob_t, EdgeProb>(hg); +} + +void IncrementLatticePath(const Hypergraph& hg, const vector<unsigned>& d, Lattice* pl) { + Lattice& lat = *pl; + for (int i = 0; i < d.size(); ++i) { + const Hypergraph::Edge& edge = hg.edges_[d[i]]; + if (edge.rule_->Arity() != 0) continue; + WordID sym = edge.rule_->e_[0]; + vector<LatticeArc>& las = lat[edge.i_]; + int dist = edge.j_ - edge.i_; + assert(dist > 0); + for (int j = 0; j < las.size(); ++j) { + if (las[j].dist2next == dist && + las[j].label == sym) { + las[j].cost += 1; + } + } + } +} + +int main(int argc, char** argv) { + po::variables_map conf; + + InitCommandLine(argc, argv, &conf); + vector<GrammarPtr> grammars(2); + grammars[0].reset(new GlueGrammar("S","X")); + const unsigned samples = conf["samples"].as<unsigned>(); + + if (conf.count("random_seed")) + prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); + else + prng.reset(new MT19937); + MT19937& rng = *prng; + vector<Lattice> corpuse; + set<WordID> vocabe; + cerr << "Reading corpus...\n"; + const unsigned toks = ReadCorpus(conf["input"].as<string>(), &corpuse, &vocabe); + cerr << "E-corpus size: " << corpuse.size() << " lattices\t (" << vocabe.size() << " word types)\n"; + UnigramModel lm(vocabe.size()); + vector<Hypergraph> hgs(corpuse.size()); + vector<vector<unsigned> > derivs(corpuse.size()); + for (int i = 0; i < corpuse.size(); ++i) { + grammars[1].reset(new PassThroughGrammar(corpuse[i], "X")); + ExhaustiveBottomUpParser parser("S", grammars); + bool res = parser.Parse(corpuse[i], &hgs[i]); // exhaustive parse + assert(res); + } + + double csamples = 0; + for (int SS=0; SS < samples; ++SS) { + const bool is_last = ((samples - 1) == SS); + prob_t dlh = prob_t::One(); + bool record_sample = (SS > (samples * 1 / 3) && (SS % 5 == 3)); + if (record_sample) csamples++; + for (int ci = 0; ci < corpuse.size(); ++ci) { + Lattice& lat = corpuse[ci]; + Hypergraph& hg = hgs[ci]; + vector<unsigned>& d = derivs[ci]; + if (!is_last) DecrementDerivation(hg, d, &lm, &rng); + for (unsigned i = 0; i < hg.edges_.size(); ++i) { + TRule& r = *hg.edges_[i].rule_; + if (r.Arity() != 0) + hg.edges_[i].edge_prob_ = prob_t::One(); + else + hg.edges_[i].edge_prob_ = lm.Prob(r); + } + if (!is_last) { + d.clear(); + SampleDerivation(hg, &rng, &d); + IncrementDerivation(hg, derivs[ci], &lm, &rng); + } else { + prob_t p = TotalProb(hg); + dlh *= p; + cerr << " p(sentence) = " << log(p) << "\t" << log(dlh) << endl; + } + if (record_sample) IncrementLatticePath(hg, derivs[ci], &lat); + } + double llh = log(lm.Likelihood()); + cerr << "LLH=" << llh << "\tENTROPY=" << (-llh / log(2) / toks) << "\tPPL=" << pow(2, -llh / log(2) / toks) << endl; + if (SS % 10 == 9) lm.ResampleHyperparameters(&rng); + if (is_last) { + double z = log(dlh); + cerr << "TOTAL_PROB=" << z << "\tENTROPY=" << (-z / log(2) / toks) << "\tPPL=" << pow(2, -z / log(2) / toks) << endl; + } + } + cerr << lm.crp << endl; + cerr << lm.glue << endl; + for (int i = 0; i < corpuse.size(); ++i) { + for (int j = 0; j < corpuse[i].size(); ++j) + for (int k = 0; k < corpuse[i][j].size(); ++k) { + corpuse[i][j][k].cost /= csamples; + corpuse[i][j][k].cost += 1e-3; + corpuse[i][j][k].cost = log(corpuse[i][j][k].cost); + } + cout << HypergraphIO::AsPLF(corpuse[i]) << endl; + } + return 0; +} + diff --git a/gi/pf/brat.cc b/gi/pf/brat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/brat.cc +++ b/gi/pf/brat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr<MT19937> prng; + boost::shared_ptr<MT19937> prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); else diff --git a/gi/pf/cfg_wfst_composer.cc b/gi/pf/cfg_wfst_composer.cc index a31b5be8..20520c81 100644 --- a/gi/pf/cfg_wfst_composer.cc +++ b/gi/pf/cfg_wfst_composer.cc @@ -16,7 +16,6 @@ #include "tdict.h" #include "hg.h" -using boost::shared_ptr; namespace po = boost::program_options; using namespace std; using namespace std::tr1; @@ -114,7 +113,7 @@ struct Edge { const Edge* const active_parent; // back pointer, NULL for PREDICT items const Edge* const passive_parent; // back pointer, NULL for SCAN and PREDICT items TRulePtr tps; // translations - shared_ptr<SparseVector<double> > features; // features from CFG rule + boost::shared_ptr<SparseVector<double> > features; // features from CFG rule bool IsPassive() const { // when a rule is completed, this value will be set diff --git a/gi/pf/condnaive.cc b/gi/pf/condnaive.cc index 3ea88016..419731ac 100644 --- a/gi/pf/condnaive.cc +++ b/gi/pf/condnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; struct ModelAndData { explicit ModelAndData(ConditionalParallelSegementationModel<PhraseConditionalBase>& m, const vector<vector<int> >& ce, const vector<vector<int> >& cf, const set<int>& ve, const set<int>& vf) : diff --git a/gi/pf/dpnaive.cc b/gi/pf/dpnaive.cc index 469dff5c..75ccad72 100644 --- a/gi/pf/dpnaive.cc +++ b/gi/pf/dpnaive.cc @@ -55,7 +55,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) { } } -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; template <typename Base> struct ModelAndData { diff --git a/gi/pf/hpyp_tm.cc b/gi/pf/hpyp_tm.cc new file mode 100644 index 00000000..784f9958 --- /dev/null +++ b/gi/pf/hpyp_tm.cc @@ -0,0 +1,133 @@ +#include "hpyp_tm.h" + +#include <tr1/unordered_map> +#include <iostream> +#include <queue> + +#include "tdict.h" +#include "ccrp.h" +#include "pyp_word_model.h" +#include "tied_resampler.h" + +using namespace std; +using namespace std::tr1; + +struct FreqBinner { + FreqBinner(const std::string& fname) { fd_.Load(fname); } + unsigned NumberOfBins() const { return fd_.Max() + 1; } + unsigned Bin(const WordID& w) const { return fd_.LookUp(w); } + FreqDict<unsigned> fd_; +}; + +template <typename Base, class Binner = FreqBinner> +struct ConditionalPYPWordModel { + ConditionalPYPWordModel(Base* b, const Binner* bnr = NULL) : + base(*b), + binner(bnr), + btr(binner ? binner->NumberOfBins() + 1u : 2u) {} + + void Summary() const { + cerr << "Number of conditioning contexts: " << r.size() << endl; + for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { + cerr << TD::Convert(it->first) << " \tPYP(d=" << it->second.discount() << ",s=" << it->second.strength() << ") --------------------------" << endl; + for (CCRP<vector<WordID> >::const_iterator i2 = it->second.begin(); i2 != it->second.end(); ++i2) + cerr << " " << i2->second.total_dish_count_ << '\t' << TD::GetString(i2->first) << endl; + } + } + + void ResampleHyperparameters(MT19937* rng) { + btr.ResampleHyperparameters(rng); + } + + prob_t Prob(const WordID src, const vector<WordID>& trglets) const { + RuleModelHash::const_iterator it = r.find(src); + if (it == r.end()) { + return base(trglets); + } else { + return it->second.prob(trglets, base(trglets)); + } + } + + void Increment(const WordID src, const vector<WordID>& trglets, MT19937* rng) { + RuleModelHash::iterator it = r.find(src); + if (it == r.end()) { + it = r.insert(make_pair(src, CCRP<vector<WordID> >(0.5,1.0))).first; + static const WordID kNULL = TD::Convert("NULL"); + unsigned bin = (src == kNULL ? 0 : 1); + if (binner && bin) { bin = binner->Bin(src) + 1; } + btr.Add(bin, &it->second); + } + if (it->second.increment(trglets, base(trglets), rng)) + base.Increment(trglets, rng); + } + + void Decrement(const WordID src, const vector<WordID>& trglets, MT19937* rng) { + RuleModelHash::iterator it = r.find(src); + assert(it != r.end()); + if (it->second.decrement(trglets, rng)) { + base.Decrement(trglets, rng); + } + } + + prob_t Likelihood() const { + prob_t p = prob_t::One(); + for (RuleModelHash::const_iterator it = r.begin(); it != r.end(); ++it) { + prob_t q; q.logeq(it->second.log_crp_prob()); + p *= q; + } + return p; + } + + unsigned UniqueConditioningContexts() const { + return r.size(); + } + + // TODO tie PYP hyperparameters based on source word frequency bins + Base& base; + const Binner* binner; + BinTiedResampler<CCRP<vector<WordID> > > btr; + typedef unordered_map<WordID, CCRP<vector<WordID> > > RuleModelHash; + RuleModelHash r; +}; + +HPYPLexicalTranslation::HPYPLexicalTranslation(const vector<vector<WordID> >& lets, + const unsigned vocab_size, + const unsigned num_letters) : + letters(lets), + base(vocab_size, num_letters, 5), + up0(new PYPWordModel<PoissonUniformWordModel>(&base)), + tmodel(new ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel> >(up0, new FreqBinner("10k.freq"))), + kX(-TD::Convert("X")) {} + +void HPYPLexicalTranslation::Summary() const { + tmodel->Summary(); + up0->Summary(); +} + +prob_t HPYPLexicalTranslation::Likelihood() const { + prob_t p = up0->Likelihood(); + p *= tmodel->Likelihood(); + return p; +} + +void HPYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { + tmodel->ResampleHyperparameters(rng); + up0->ResampleHyperparameters(rng); +} + +unsigned HPYPLexicalTranslation::UniqueConditioningContexts() const { + return tmodel->UniqueConditioningContexts(); +} + +prob_t HPYPLexicalTranslation::Prob(WordID src, WordID trg) const { + return tmodel->Prob(src, letters[trg]); +} + +void HPYPLexicalTranslation::Increment(WordID src, WordID trg, MT19937* rng) { + tmodel->Increment(src, letters[trg], rng); +} + +void HPYPLexicalTranslation::Decrement(WordID src, WordID trg, MT19937* rng) { + tmodel->Decrement(src, letters[trg], rng); +} + diff --git a/gi/pf/hpyp_tm.h b/gi/pf/hpyp_tm.h new file mode 100644 index 00000000..af3215ba --- /dev/null +++ b/gi/pf/hpyp_tm.h @@ -0,0 +1,38 @@ +#ifndef HPYP_LEX_TRANS +#define HPYP_LEX_TRANS + +#include <vector> +#include "wordid.h" +#include "prob.h" +#include "sampler.h" +#include "freqdict.h" +#include "poisson_uniform_word_model.h" + +struct FreqBinner; +template <class B> struct PYPWordModel; +template <typename T, class B> struct ConditionalPYPWordModel; + +struct HPYPLexicalTranslation { + explicit HPYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, + const unsigned vocab_size, + const unsigned num_letters); + + prob_t Likelihood() const; + + void ResampleHyperparameters(MT19937* rng); + prob_t Prob(WordID src, WordID trg) const; // return p(trg | src) + void Summary() const; + void Increment(WordID src, WordID trg, MT19937* rng); + void Decrement(WordID src, WordID trg, MT19937* rng); + unsigned UniqueConditioningContexts() const; + + private: + const std::vector<std::vector<WordID> >& letters; // spelling dictionary + PoissonUniformWordModel base; // "generator" of English types + PYPWordModel<PoissonUniformWordModel>* up0; // model English lexicon + ConditionalPYPWordModel<PYPWordModel<PoissonUniformWordModel>, FreqBinner>* tmodel; // translation distributions + // (model English word | French word) + const WordID kX; +}; + +#endif diff --git a/gi/pf/itg.cc b/gi/pf/itg.cc index a38fe672..29ec3860 100644 --- a/gi/pf/itg.cc +++ b/gi/pf/itg.cc @@ -231,7 +231,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr<MT19937> prng; + boost::shared_ptr<MT19937> prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); else diff --git a/gi/pf/learn_cfg.cc b/gi/pf/learn_cfg.cc index ed1772bf..44eaa162 100644 --- a/gi/pf/learn_cfg.cc +++ b/gi/pf/learn_cfg.cc @@ -24,7 +24,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; vector<int> nt_vocab; vector<int> nt_id_to_index; static unsigned kMAX_RULE_SIZE = 0; diff --git a/gi/pf/mh_test.cc b/gi/pf/mh_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/mh_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include <vector> +#include <iostream> + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + + Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} + + double p0(int x) const { + assert(x > 0); + assert(x < 5); + return 1.0/4.0; + } + + double llh() const { + double lh = bp + base.log_crp_prob(); + for (int ctx = 1; ctx < 5; ++ctx) + lh += ccrps[ctx].log_crp_prob(); + return lh; + } + + double prob(int ctx, int x) const { + assert(ctx > 0 && ctx < 5); + return ccrps[ctx].prob(x, base.prob(x, p0(x))); + } + + void increment(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { + if (base.increment(x, p0(x), &rng)) { + bp += log(1.0 / 4.0); + } + } + } + + // this is just a biased estimate + double est_base_prob(int x) { + return (x + 1) * x / 40.0; + } + + void increment_is(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + SampleSet<double> ss; + const int PARTICLES = 25; + vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); + vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); + vector<double> sp0s(PARTICLES); + + CCRP<int> s1 = ccrps[ctx]; + CCRP<int> sb = base; + double sp0 = bp; + for (int pp = 0; pp < PARTICLES; ++pp) { + if (pp > 0) { + ccrps[ctx] = s1; + base = sb; + bp = sp0; + } + + double q = 1; + double gamma = 1; + double est_p = est_base_prob(x); + //base.prob(x, p0(x)) + rng.next() * 0.1; + if (ccrps[ctx].increment(x, est_p, &rng, &q)) { + gamma = q * base.prob(x, p0(x)); + q *= est_p; + if (verbose) cerr << "(DP-base draw) "; + double qq = -1; + if (base.increment(x, p0(x), &rng, &qq)) { + if (verbose) cerr << "(G0 draw) "; + bp += log(p0(x)); + qq *= p0(x); + } + } else { gamma = q; } + double w = gamma / q; + if (verbose) + cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; + ss.add(w); + s1s[pp] = ccrps[ctx]; + sbs[pp] = base; + sp0s[pp] = bp; + } + int ps = rng.SelectSample(ss); + ccrps[ctx] = s1s[ps]; + base = sbs[ps]; + bp = sp0s[ps]; + if (verbose) { + cerr << "SELECTED: " << ps << endl; + static int cc = 0; cc++; if (cc ==10) exit(1); + } + } + + void decrement(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].decrement(x, &rng)) { + if (base.decrement(x, &rng)) { + bp -= log(p0(x)); + } + } + } + + double bp; + CCRP<int> base; + vector<CCRP<int> > ccrps; + +}; + +int main(int argc, char** argv) { + if (argc > 1) { verbose = true; } + vector<int> counts(15, 0); + vector<int> tcounts(15, 0); + int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; + double tlh = 0; + double tt = 0; + for (int n = 0; n < 1000; ++n) { + if (n % 10 == 0) cerr << '.'; + if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; + Model m; + for (int *x = points; *x; x += 2) + m.increment(x[0], x[1]); + + for (int j = 0; j < 24; ++j) { + for (int *x = points; *x; x += 2) { + if (rng.next() < 0.8) { + m.decrement(x[0], x[1]); + m.increment_is(x[0], x[1]); + } + } + } + counts[m.base.num_customers()]++; + tcounts[m.base.num_tables()]++; + tlh += m.llh(); + tt += 1.0; + } + cerr << "mean LLH = " << (tlh / tt) << endl; + for (int i = 0; i < 15; ++i) + cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/gi/pf/pf_test.cc b/gi/pf/pf_test.cc new file mode 100644 index 00000000..296e7285 --- /dev/null +++ b/gi/pf/pf_test.cc @@ -0,0 +1,148 @@ +#include "ccrp.h" + +#include <vector> +#include <iostream> + +#include "tdict.h" +#include "transliterations.h" + +using namespace std; + +MT19937 rng; + +static bool verbose = false; + +struct Model { + + Model() : bp(), base(0.2, 0.6) , ccrps(5, CCRP<int>(0.8, 0.5)) {} + + double p0(int x) const { + assert(x > 0); + assert(x < 5); + return 1.0/4.0; + } + + double llh() const { + double lh = bp + base.log_crp_prob(); + for (int ctx = 1; ctx < 5; ++ctx) + lh += ccrps[ctx].log_crp_prob(); + return lh; + } + + double prob(int ctx, int x) const { + assert(ctx > 0 && ctx < 5); + return ccrps[ctx].prob(x, base.prob(x, p0(x))); + } + + void increment(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].increment(x, base.prob(x, p0(x)), &rng)) { + if (base.increment(x, p0(x), &rng)) { + bp += log(1.0 / 4.0); + } + } + } + + // this is just a biased estimate + double est_base_prob(int x) { + return (x + 1) * x / 40.0; + } + + void increment_is(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + SampleSet<double> ss; + const int PARTICLES = 25; + vector<CCRP<int> > s1s(PARTICLES, CCRP<int>(0.5,0.5)); + vector<CCRP<int> > sbs(PARTICLES, CCRP<int>(0.5,0.5)); + vector<double> sp0s(PARTICLES); + + CCRP<int> s1 = ccrps[ctx]; + CCRP<int> sb = base; + double sp0 = bp; + for (int pp = 0; pp < PARTICLES; ++pp) { + if (pp > 0) { + ccrps[ctx] = s1; + base = sb; + bp = sp0; + } + + double q = 1; + double gamma = 1; + double est_p = est_base_prob(x); + //base.prob(x, p0(x)) + rng.next() * 0.1; + if (ccrps[ctx].increment(x, est_p, &rng, &q)) { + gamma = q * base.prob(x, p0(x)); + q *= est_p; + if (verbose) cerr << "(DP-base draw) "; + double qq = -1; + if (base.increment(x, p0(x), &rng, &qq)) { + if (verbose) cerr << "(G0 draw) "; + bp += log(p0(x)); + qq *= p0(x); + } + } else { gamma = q; } + double w = gamma / q; + if (verbose) + cerr << "gamma=" << gamma << " q=" << q << "\tw=" << w << endl; + ss.add(w); + s1s[pp] = ccrps[ctx]; + sbs[pp] = base; + sp0s[pp] = bp; + } + int ps = rng.SelectSample(ss); + ccrps[ctx] = s1s[ps]; + base = sbs[ps]; + bp = sp0s[ps]; + if (verbose) { + cerr << "SELECTED: " << ps << endl; + static int cc = 0; cc++; if (cc ==10) exit(1); + } + } + + void decrement(int ctx, int x) { + assert(ctx > 0 && ctx < 5); + if (ccrps[ctx].decrement(x, &rng)) { + if (base.decrement(x, &rng)) { + bp -= log(p0(x)); + } + } + } + + double bp; + CCRP<int> base; + vector<CCRP<int> > ccrps; + +}; + +int main(int argc, char** argv) { + if (argc > 1) { verbose = true; } + vector<int> counts(15, 0); + vector<int> tcounts(15, 0); + int points[] = {1,2, 2,2, 3,2, 4,1, 3, 4, 3, 3, 2, 3, 4, 1, 4, 1, 3, 2, 1, 3, 1, 4, 0, 0}; + double tlh = 0; + double tt = 0; + for (int n = 0; n < 1000; ++n) { + if (n % 10 == 0) cerr << '.'; + if ((n+1) % 400 == 0) cerr << " [" << (n+1) << "]\n"; + Model m; + for (int *x = points; *x; x += 2) + m.increment(x[0], x[1]); + + for (int j = 0; j < 24; ++j) { + for (int *x = points; *x; x += 2) { + if (rng.next() < 0.8) { + m.decrement(x[0], x[1]); + m.increment_is(x[0], x[1]); + } + } + } + counts[m.base.num_customers()]++; + tcounts[m.base.num_tables()]++; + tlh += m.llh(); + tt += 1.0; + } + cerr << "mean LLH = " << (tlh / tt) << endl; + for (int i = 0; i < 15; ++i) + cerr << i << ": " << (counts[i] / tt) << "\t" << (tcounts[i] / tt) << endl; +} + diff --git a/gi/pf/pfbrat.cc b/gi/pf/pfbrat.cc index c2c52760..832f22cf 100644 --- a/gi/pf/pfbrat.cc +++ b/gi/pf/pfbrat.cc @@ -489,7 +489,7 @@ int main(int argc, char** argv) { cerr << argv[0] << "Please use --model1 to specify model 1 parameters\n"; return 1; } - shared_ptr<MT19937> prng; + boost::shared_ptr<MT19937> prng; if (conf.count("random_seed")) prng.reset(new MT19937(conf["random_seed"].as<uint32_t>())); else diff --git a/gi/pf/pfdist.cc b/gi/pf/pfdist.cc index 3d578db2..a3e46064 100644 --- a/gi/pf/pfdist.cc +++ b/gi/pf/pfdist.cc @@ -23,7 +23,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/gi/pf/pfnaive.cc b/gi/pf/pfnaive.cc index e1a53f5c..958ec4e2 100644 --- a/gi/pf/pfnaive.cc +++ b/gi/pf/pfnaive.cc @@ -25,7 +25,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/gi/pf/poisson_uniform_word_model.h b/gi/pf/poisson_uniform_word_model.h new file mode 100644 index 00000000..76204a0e --- /dev/null +++ b/gi/pf/poisson_uniform_word_model.h @@ -0,0 +1,50 @@ +#ifndef _POISSON_UNIFORM_WORD_MODEL_H_ +#define _POISSON_UNIFORM_WORD_MODEL_H_ + +#include <cmath> +#include <vector> +#include "prob.h" +#include "m.h" + +// len ~ Poisson(lambda) +// for (1..len) +// e_i ~ Uniform({Vocabulary}) +struct PoissonUniformWordModel { + explicit PoissonUniformWordModel(const unsigned vocab_size, + const unsigned alphabet_size, + const double mean_len = 5) : + lh(prob_t::One()), + v0(-std::log(vocab_size)), + u0(-std::log(alphabet_size)), + mean_length(mean_len) {} + + void ResampleHyperparameters(MT19937*) {} + + inline prob_t operator()(const std::vector<WordID>& s) const { + prob_t p; + p.logeq(Md::log_poisson(s.size(), mean_length) + s.size() * u0); + //p.logeq(v0); + return p; + } + + inline void Increment(const std::vector<WordID>& w, MT19937*) { + lh *= (*this)(w); + } + + inline void Decrement(const std::vector<WordID>& w, MT19937 *) { + lh /= (*this)(w); + } + + inline prob_t Likelihood() const { return lh; } + + void Summary() const {} + + private: + + prob_t lh; // keeps track of the draws from the base distribution + const double v0; // uniform log prob of generating a word + const double u0; // uniform log prob of generating a letter + const double mean_length; // mean length of a word in the base distribution +}; + +#endif diff --git a/gi/pf/pyp_lm.cc b/gi/pf/pyp_lm.cc index 91029688..e2b67e17 100644 --- a/gi/pf/pyp_lm.cc +++ b/gi/pf/pyp_lm.cc @@ -25,7 +25,7 @@ using namespace std; using namespace tr1; namespace po = boost::program_options; -shared_ptr<MT19937> prng; +boost::shared_ptr<MT19937> prng; void InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); diff --git a/gi/pf/pyp_tm.cc b/gi/pf/pyp_tm.cc index e21f0267..6bc8a5bf 100644 --- a/gi/pf/pyp_tm.cc +++ b/gi/pf/pyp_tm.cc @@ -91,26 +91,23 @@ struct ConditionalPYPWordModel { }; PYPLexicalTranslation::PYPLexicalTranslation(const vector<vector<WordID> >& lets, + const unsigned vocab_size, const unsigned num_letters) : letters(lets), - up0(new PYPWordModel(num_letters)), - tmodel(new ConditionalPYPWordModel<PYPWordModel>(up0, new FreqBinner("10k.freq"))), + base(vocab_size, num_letters, 5), + tmodel(new ConditionalPYPWordModel<PoissonUniformWordModel>(&base, new FreqBinner("10k.freq"))), kX(-TD::Convert("X")) {} void PYPLexicalTranslation::Summary() const { tmodel->Summary(); - up0->Summary(); } prob_t PYPLexicalTranslation::Likelihood() const { - prob_t p = up0->Likelihood(); - p *= tmodel->Likelihood(); - return p; + return tmodel->Likelihood() * base.Likelihood(); } void PYPLexicalTranslation::ResampleHyperparameters(MT19937* rng) { tmodel->ResampleHyperparameters(rng); - up0->ResampleHyperparameters(rng); } unsigned PYPLexicalTranslation::UniqueConditioningContexts() const { diff --git a/gi/pf/pyp_tm.h b/gi/pf/pyp_tm.h index 63e7c96d..2b076a25 100644 --- a/gi/pf/pyp_tm.h +++ b/gi/pf/pyp_tm.h @@ -6,13 +6,14 @@ #include "prob.h" #include "sampler.h" #include "freqdict.h" +#include "poisson_uniform_word_model.h" struct FreqBinner; -struct PYPWordModel; template <typename T, class B> struct ConditionalPYPWordModel; struct PYPLexicalTranslation { explicit PYPLexicalTranslation(const std::vector<std::vector<WordID> >& lets, + const unsigned vocab_size, const unsigned num_letters); prob_t Likelihood() const; @@ -26,8 +27,8 @@ struct PYPLexicalTranslation { private: const std::vector<std::vector<WordID> >& letters; // spelling dictionary - PYPWordModel* up0; // base distribuction (model English word) - ConditionalPYPWordModel<PYPWordModel, FreqBinner>* tmodel; // translation distributions + PoissonUniformWordModel base; // "generator" of English types + ConditionalPYPWordModel<PoissonUniformWordModel, FreqBinner>* tmodel; // translation distributions // (model English word | French word) const WordID kX; }; diff --git a/gi/pf/pyp_word_model.cc b/gi/pf/pyp_word_model.cc deleted file mode 100644 index 12df4abf..00000000 --- a/gi/pf/pyp_word_model.cc +++ /dev/null @@ -1,20 +0,0 @@ -#include "pyp_word_model.h" - -#include <iostream> - -using namespace std; - -void PYPWordModel::ResampleHyperparameters(MT19937* rng) { - r.resample_hyperparameters(rng); - cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; -} - -void PYPWordModel::Summary() const { - cerr << "PYPWordModel: generations=" << r.num_customers() - << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << endl; - for (CCRP<vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) - cerr << " " << it->second.total_dish_count_ - << " (on " << it->second.table_counts_.size() << " tables) " - << TD::GetString(it->first) << endl; -} - diff --git a/gi/pf/pyp_word_model.h b/gi/pf/pyp_word_model.h index ff366865..224a9034 100644 --- a/gi/pf/pyp_word_model.h +++ b/gi/pf/pyp_word_model.h @@ -11,48 +11,52 @@ #include "os_phrase.h" // PYP(d,s,poisson-uniform) represented as a CRP +template <class Base> struct PYPWordModel { - explicit PYPWordModel(const unsigned vocab_e_size, const double mean_len = 5) : - base(prob_t::One()), r(1,1,1,1,0.66,50.0), u0(-std::log(vocab_e_size)), mean_length(mean_len) {} - - void ResampleHyperparameters(MT19937* rng); + explicit PYPWordModel(Base* b) : + base(*b), + r(1,1,1,1,0.66,50.0) + {} + + void ResampleHyperparameters(MT19937* rng) { + r.resample_hyperparameters(rng); + std::cerr << " PYPWordModel(d=" << r.discount() << ",s=" << r.strength() << ")\n"; + } inline prob_t operator()(const std::vector<WordID>& s) const { - return r.prob(s, p0(s)); + return r.prob(s, base(s)); } inline void Increment(const std::vector<WordID>& s, MT19937* rng) { - if (r.increment(s, p0(s), rng)) - base *= p0(s); + if (r.increment(s, base(s), rng)) + base.Increment(s, rng); } inline void Decrement(const std::vector<WordID>& s, MT19937 *rng) { if (r.decrement(s, rng)) - base /= p0(s); + base.Decrement(s, rng); } inline prob_t Likelihood() const { prob_t p; p.logeq(r.log_crp_prob()); - p *= base; + p *= base.Likelihood(); return p; } - void Summary() const; - - private: - inline double logp0(const std::vector<WordID>& s) const { - return Md::log_poisson(s.size(), mean_length) + s.size() * u0; + void Summary() const { + std::cerr << "PYPWordModel: generations=" << r.num_customers() + << " PYP(d=" << r.discount() << ",s=" << r.strength() << ')' << std::endl; + for (typename CCRP<std::vector<WordID> >::const_iterator it = r.begin(); it != r.end(); ++it) { + std::cerr << " " << it->second.total_dish_count_ + << " (on " << it->second.table_counts_.size() << " tables) " + << TD::GetString(it->first) << std::endl; + } } - inline prob_t p0(const std::vector<WordID>& s) const { - prob_t p; p.logeq(logp0(s)); - return p; - } + private: - prob_t base; // keeps track of the draws from the base distribution + Base& base; // keeps track of the draws from the base distribution CCRP<std::vector<WordID> > r; - const double u0; // uniform log prob of generating a letter - const double mean_length; // mean length of a word in the base distribution }; #endif diff --git a/gi/pf/quasi_model2.h b/gi/pf/quasi_model2.h index 588c8f84..4075affe 100644 --- a/gi/pf/quasi_model2.h +++ b/gi/pf/quasi_model2.h @@ -9,6 +9,7 @@ #include "array2d.h" #include "slice_sampler.h" #include "m.h" +#include "have_64_bits.h" struct AlignmentObservation { AlignmentObservation() : src_len(), trg_len(), j(), a_j() {} @@ -20,13 +21,23 @@ struct AlignmentObservation { unsigned short a_j; }; +#ifdef HAVE_64_BITS inline size_t hash_value(const AlignmentObservation& o) { return reinterpret_cast<const size_t&>(o); } - inline bool operator==(const AlignmentObservation& a, const AlignmentObservation& b) { return hash_value(a) == hash_value(b); } +#else +inline size_t hash_value(const AlignmentObservation& o) { + size_t h = 1; + boost::hash_combine(h, o.src_len); + boost::hash_combine(h, o.trg_len); + boost::hash_combine(h, o.j); + boost::hash_combine(h, o.a_j); + return h; +} +#endif struct QuasiModel2 { explicit QuasiModel2(double alpha, double pnull = 0.1) : diff --git a/gi/pf/tied_resampler.h b/gi/pf/tied_resampler.h index 6f45fbce..a4f4af36 100644 --- a/gi/pf/tied_resampler.h +++ b/gi/pf/tied_resampler.h @@ -78,10 +78,8 @@ struct TiedResampler { std::numeric_limits<double>::infinity(), 0.0, niterations, 100*niterations); std::cerr << "TiedCRPs(d=" << discount << ",s=" << strength << ") = " << LogLikelihood(discount, strength) << std::endl; - for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) { - (*it)->set_discount(discount); - (*it)->set_strength(strength); - } + for (typename std::set<CRP*>::iterator it = crps.begin(); it != crps.end(); ++it) + (*it)->set_hyperparameters(discount, strength); } private: std::set<CRP*> crps; |