#include #include #include #include #include #include "config.h" #include #include #include #include "sentence_metadata.h" #include "scorer.h" #include "verbose.h" #include "viterbi.h" #include "hg.h" #include "prob.h" #include "kbest.h" #include "ff_register.h" #include "decoder.h" #include "filelib.h" #include "fdict.h" #include "weights.h" #include "sparse_vector.h" #include "sampler.h" using namespace std; using boost::shared_ptr; namespace po = boost::program_options; bool invert_score; boost::shared_ptr rng; void SanityCheck(const vector& w) { for (int i = 0; i < w.size(); ++i) { assert(!isnan(w[i])); assert(!isinf(w[i])); } } struct FComp { const vector& w_; FComp(const vector& w) : w_(w) {} bool operator()(int a, int b) const { return fabs(w_[a]) > fabs(w_[b]); } }; void RandomPermutation(int len, vector* p_ids) { vector& ids = *p_ids; ids.resize(len); for (int i = 0; i < len; ++i) ids[i] = i; for (int i = len; i > 0; --i) { int j = rng->next() * i; if (j == i) i--; swap(ids[i-1], ids[j]); } } void ShowLargestFeatures(const vector& w) { vector fnums(w.size()); for (int i = 0; i < w.size(); ++i) fnums[i] = i; vector::iterator mid = fnums.begin(); mid += (w.size() > 10 ? 10 : w.size()); partial_sort(fnums.begin(), mid, fnums.end(), FComp(w)); cerr << "TOP FEATURES:"; --mid; for (vector::iterator i = fnums.begin(); i != mid; ++i) { cerr << ' ' << FD::Convert(*i) << '=' << w[*i]; } cerr << endl; } bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() ("input_weights,w",po::value(),"Input feature weights file") ("source,i",po::value(),"Source file for development set") ("passes,p", po::value()->default_value(15), "Number of passes through the training data") ("reference,r",po::value >(), "[REQD] Reference translation(s) (tokenized text file)") ("mt_metric,m",po::value()->default_value("ibm_bleu"), "Scoring metric (ibm_bleu, nist_bleu, koehn_bleu, ter, combi)") ("max_step_size,C", po::value()->default_value(0.01), "regularization strength (C)") ("mt_metric_scale,s", po::value()->default_value(1.0), "Amount to scale MT loss function by") ("k_best_size,k", po::value()->default_value(250), "Size of hypothesis list to search for oracles") ("random_seed,S", po::value(), "Random seed (if not specified, /dev/random will be used)") ("decoder_config,c",po::value(),"Decoder configuration file"); po::options_description clo("Command line options"); clo.add_options() ("config", po::value(), "Configuration file") ("help,h", "Print this help message and exit"); po::options_description dconfig_options, dcmdline_options; dconfig_options.add(opts); dcmdline_options.add(opts).add(clo); po::store(parse_command_line(argc, argv, dcmdline_options), *conf); if (conf->count("config")) { ifstream config((*conf)["config"].as().c_str()); po::store(po::parse_config_file(config, dconfig_options), *conf); } po::notify(*conf); if (conf->count("help") || !conf->count("input_weights") || !conf->count("source") || !conf->count("decoder_config") || !conf->count("reference")) { cerr << dcmdline_options << endl; return false; } return true; } static const double kMINUS_EPSILON = -1e-6; struct HypothesisInfo { SparseVector features; double mt_metric; }; struct GoodBadOracle { shared_ptr good; shared_ptr bad; }; struct TrainingObserver : public DecoderObserver { TrainingObserver(const int k, const DocScorer& d, vector* o) : ds(d), oracles(*o), kbest_size(k) {} const DocScorer& ds; vector& oracles; shared_ptr cur_best; const int kbest_size; const HypothesisInfo& GetCurrentBestHypothesis() const { return *cur_best; } virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { UpdateOracles(smeta.GetSentenceID(), *hg); } shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score) { shared_ptr h(new HypothesisInfo); h->features = feats; h->mt_metric = score; return h; } void UpdateOracles(int sent_id, const Hypergraph& forest) { shared_ptr& cur_good = oracles[sent_id].good; shared_ptr& cur_bad = oracles[sent_id].bad; cur_bad.reset(); // TODO get rid of?? KBest::KBestDerivations, ESentenceTraversal> kbest(forest, kbest_size); for (int i = 0; i < kbest_size; ++i) { const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = kbest.LazyKthBest(forest.nodes_.size() - 1, i); if (!d) break; float sentscore = ds[sent_id]->ScoreCandidate(d->yield)->ComputeScore(); if (invert_score) sentscore *= -1.0; // cerr << TD::GetString(d->yield) << " ||| " << d->score << " ||| " << sentscore << endl; if (i == 0) cur_best = MakeHypothesisInfo(d->feature_values, sentscore); if (!cur_good || sentscore > cur_good->mt_metric) cur_good = MakeHypothesisInfo(d->feature_values, sentscore); if (!cur_bad || sentscore < cur_bad->mt_metric) cur_bad = MakeHypothesisInfo(d->feature_values, sentscore); } //cerr << "GOOD: " << cur_good->mt_metric << endl; //cerr << " CUR: " << cur_best->mt_metric << endl; //cerr << " BAD: " << cur_bad->mt_metric << endl; } }; void ReadTrainingCorpus(const string& fname, vector* c) { ReadFile rf(fname); istream& in = *rf.stream(); string line; while(in) { getline(in, line); if (!in) break; c->push_back(line); } } bool ApproxEqual(double a, double b) { if (a == b) return true; return (fabs(a-b)/fabs(b)) < 0.000001; } int main(int argc, char** argv) { register_feature_functions(); SetSilent(true); // turn off verbose decoder output po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; if (conf.count("random_seed")) rng.reset(new MT19937(conf["random_seed"].as())); else rng.reset(new MT19937); vector corpus; ReadTrainingCorpus(conf["source"].as(), &corpus); const string metric_name = conf["mt_metric"].as(); ScoreType type = ScoreTypeFromString(metric_name); if (type == TER) { invert_score = true; } else { invert_score = false; } DocScorer ds(type, conf["reference"].as >(), ""); cerr << "Loaded " << ds.size() << " references for scoring with " << metric_name << endl; if (ds.size() != corpus.size()) { cerr << "Mismatched number of references (" << ds.size() << ") and sources (" << corpus.size() << ")\n"; return 1; } // load initial weights Weights weights; weights.InitFromFile(conf["input_weights"].as()); SparseVector lambdas; weights.InitSparseVector(&lambdas); ReadFile ini_rf(conf["decoder_config"].as()); Decoder decoder(ini_rf.stream()); const double max_step_size = conf["max_step_size"].as(); const double mt_metric_scale = conf["mt_metric_scale"].as(); assert(corpus.size() > 0); vector oracles(corpus.size()); TrainingObserver observer(conf["k_best_size"].as(), ds, &oracles); int cur_sent = 0; int lcount = 0; int normalizer = 0; double tot_loss = 0; int dots = 0; int cur_pass = 0; vector dense_weights; SparseVector tot; tot += lambdas; // initial weights normalizer++; // count for initial weights int max_iteration = conf["passes"].as() * corpus.size(); string msg = "# MIRA tuned weights"; string msga = "# MIRA tuned weights AVERAGED"; vector order; RandomPermutation(corpus.size(), &order); while (lcount <= max_iteration) { dense_weights.clear(); weights.InitFromVector(lambdas); weights.InitVector(&dense_weights); decoder.SetWeights(dense_weights); if ((cur_sent * 40 / corpus.size()) > dots) { ++dots; cerr << '.'; } if (corpus.size() == cur_sent) { cerr << " [AVG METRIC LAST PASS=" << (tot_loss / corpus.size()) << "]\n"; ShowLargestFeatures(dense_weights); cur_sent = 0; tot_loss = 0; dots = 0; ostringstream os; os << "weights.mira-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << ".gz"; weights.WriteToFile(os.str(), true, &msg); SparseVector x = tot; x /= normalizer; ostringstream sa; sa << "weights.mira-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << "-avg.gz"; Weights ww; ww.InitFromVector(x); ww.WriteToFile(sa.str(), true, &msga); ++cur_pass; RandomPermutation(corpus.size(), &order); } if (cur_sent == 0) { cerr << "PASS " << (lcount / corpus.size() + 1) << endl; } decoder.SetId(order[cur_sent]); decoder.Decode(corpus[order[cur_sent]], &observer); // update oracles const HypothesisInfo& cur_hyp = observer.GetCurrentBestHypothesis(); const HypothesisInfo& cur_good = *oracles[order[cur_sent]].good; const HypothesisInfo& cur_bad = *oracles[order[cur_sent]].bad; tot_loss += cur_hyp.mt_metric; if (!ApproxEqual(cur_hyp.mt_metric, cur_good.mt_metric)) { const double loss = cur_bad.features.dot(dense_weights) - cur_good.features.dot(dense_weights) + mt_metric_scale * (cur_good.mt_metric - cur_bad.mt_metric); //cerr << "LOSS: " << loss << endl; if (loss > 0.0) { SparseVector diff = cur_good.features; diff -= cur_bad.features; double step_size = loss / diff.l2norm_sq(); //cerr << loss << " " << step_size << " " << diff << endl; if (step_size > max_step_size) step_size = max_step_size; lambdas += (cur_good.features * step_size); lambdas -= (cur_bad.features * step_size); //cerr << "L: " << lambdas << endl; } } tot += lambdas; ++normalizer; ++lcount; ++cur_sent; } cerr << endl; weights.WriteToFile("weights.mira-final.gz", true, &msg); tot /= normalizer; weights.InitFromVector(tot); msg = "# MIRA tuned weights (averaged vector)"; weights.WriteToFile("weights.mira-final-avg.gz", true, &msg); cerr << "Optimization complete.\nAVERAGED WEIGHTS: weights.mira-final-avg.gz\n"; return 0; }