diff options
author | Jonathan Clark <jon.h.clark@gmail.com> | 2011-05-03 12:13:39 -0400 |
---|---|---|
committer | Jonathan Clark <jon.h.clark@gmail.com> | 2011-05-03 12:13:39 -0400 |
commit | 3dec805a31b4542eee07adc6d2f42f77f7980045 (patch) | |
tree | 7d29e2b5f9e9a00953d1b6771816607c6d2de071 | |
parent | 148c0d57317e097f94a1562452bc50cdc23408dc (diff) | |
parent | c214f83e941b10daa47dc9a6f0352dde42dbadb4 (diff) |
Merge branch 'master' of github.com:redpony/cdec
-rw-r--r-- | decoder/decoder.cc | 5 | ||||
-rw-r--r-- | decoder/ff.h | 3 | ||||
-rw-r--r-- | mira/Makefile.am | 6 | ||||
-rw-r--r-- | mira/kbest_mira.cc | 237 | ||||
-rw-r--r-- | utils/sparse_vector.h | 10 |
5 files changed, 255 insertions, 6 deletions
diff --git a/decoder/decoder.cc b/decoder/decoder.cc index 0d7e84ad..d8dd0c61 100644 --- a/decoder/decoder.cc +++ b/decoder/decoder.cc @@ -176,8 +176,11 @@ struct DecoderImpl { bool Decode(const string& input, DecoderObserver*); void SetWeights(const vector<double>& weights) { init_weights = weights; - for (int i = 0; i < rescoring_passes.size(); ++i) + for (int i = 0; i < rescoring_passes.size(); ++i) { + if (rescoring_passes[i].models) + rescoring_passes[i].models->SetWeights(weights); rescoring_passes[i].weight_vector = weights; + } } void SetId(int next_sent_id) { sent_id = next_sent_id - 1; } diff --git a/decoder/ff.h b/decoder/ff.h index 89b8b067..c3aa9b6d 100644 --- a/decoder/ff.h +++ b/decoder/ff.h @@ -264,6 +264,9 @@ class ModelSet { ModelSet(const std::vector<double>& weights, const std::vector<const FeatureFunction*>& models); + // TODO stop stupid copy + void SetWeights(const std::vector<double>& w) { weights_ = w; } + // sets edge->feature_values_ and edge->edge_prob_ // NOTE: edge must not necessarily be in hg.edges_ but its TAIL nodes // must be. edge features are supposed to be overwritten, not added to (possibly because rule features aren't in ModelSet so need to be left alone diff --git a/mira/Makefile.am b/mira/Makefile.am new file mode 100644 index 00000000..7b4a4e12 --- /dev/null +++ b/mira/Makefile.am @@ -0,0 +1,6 @@ +bin_PROGRAMS = kbest_mira + +kbest_mira_SOURCES = kbest_mira.cc +kbest_mira_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/mira/kbest_mira.cc b/mira/kbest_mira.cc new file mode 100644 index 00000000..7ff207a8 --- /dev/null +++ b/mira/kbest_mira.cc @@ -0,0 +1,237 @@ +#include <sstream> +#include <iostream> +#include <vector> +#include <cassert> +#include <cmath> + +#include "config.h" + +#include <boost/shared_ptr.hpp> +#include <boost/program_options.hpp> +#include <boost/program_options/variables_map.hpp> + +#include "sentence_metadata.h" +#include "scorer.h" +#include "verbose.h" +#include "viterbi.h" +#include "hg.h" +#include "prob.h" +#include "kbest.h" +#include "ff_register.h" +#include "decoder.h" +#include "filelib.h" +#include "fdict.h" +#include "weights.h" +#include "sparse_vector.h" + +using namespace std; +using boost::shared_ptr; +namespace po = boost::program_options; + +void SanityCheck(const vector<double>& w) { + for (int i = 0; i < w.size(); ++i) { + assert(!isnan(w[i])); + assert(!isinf(w[i])); + } +} + +struct FComp { + const vector<double>& w_; + FComp(const vector<double>& w) : w_(w) {} + bool operator()(int a, int b) const { + return fabs(w_[a]) > fabs(w_[b]); + } +}; + +void ShowLargestFeatures(const vector<double>& w) { + vector<int> fnums(w.size()); + for (int i = 0; i < w.size(); ++i) + fnums[i] = i; + vector<int>::iterator mid = fnums.begin(); + mid += (w.size() > 10 ? 10 : w.size()); + partial_sort(fnums.begin(), mid, fnums.end(), FComp(w)); + cerr << "TOP FEATURES:"; + for (vector<int>::iterator i = fnums.begin(); i != mid; ++i) { + cerr << ' ' << FD::Convert(*i) << '=' << w[*i]; + } + cerr << endl; +} + +bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("input_weights,w",po::value<string>(),"Input feature weights file") + ("source,i",po::value<string>(),"Source file for development set") + ("reference,r",po::value<vector<string> >(), "[REQD] Reference translation(s) (tokenized text file)") + ("mt_metric,m",po::value<string>()->default_value("ter"), "Scoring metric (ibm_bleu, nist_bleu, koehn_bleu, ter, combi)") + ("max_step_size,C", po::value<double>()->default_value(0.0001), "maximum step size (C)") + ("mt_metric_scale,s", po::value<double>()->default_value(1.0), "Amount to scale MT loss function by") + ("decoder_config,c",po::value<string>(),"Decoder configuration file"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value<string>(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as<string>().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || !conf->count("input_weights") || !conf->count("source") || !conf->count("decoder_config") || !conf->count("reference")) { + cerr << dcmdline_options << endl; + return false; + } + return true; +} + +static const double kMINUS_EPSILON = -1e-6; + +struct HypothesisInfo { + SparseVector<double> features; + double mt_metric; +}; + +struct GoodBadOracle { + shared_ptr<HypothesisInfo> good; + shared_ptr<HypothesisInfo> bad; +}; + +struct TrainingObserver : public DecoderObserver { + TrainingObserver(const DocScorer& d, vector<GoodBadOracle>* o) : ds(d), oracles(*o) {} + const DocScorer& ds; + vector<GoodBadOracle>& oracles; + shared_ptr<HypothesisInfo> cur_best; + + const HypothesisInfo& GetCurrentBestHypothesis() const { + return *cur_best; + } + + virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { + UpdateOracles(smeta.GetSentenceID(), *hg); + } + + shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score) { + shared_ptr<HypothesisInfo> h(new HypothesisInfo); + h->features = feats; + h->mt_metric = score; + return h; + } + + void UpdateOracles(int sent_id, const Hypergraph& forest) { + int kbest_size = 330; + shared_ptr<HypothesisInfo>& cur_good = oracles[sent_id].good; + shared_ptr<HypothesisInfo>& cur_bad = oracles[sent_id].bad; + cur_bad.reset(); // TODO get rid of?? + KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(forest, kbest_size); + for (int i = 0; i < kbest_size; ++i) { + const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d = + kbest.LazyKthBest(forest.nodes_.size() - 1, i); + if (!d) break; + float sentscore = ds[sent_id]->ScoreCandidate(d->yield)->ComputeScore(); +// cerr << TD::GetString(d->yield) << " ||| " << d->score << " ||| " << sentscore << endl; + if (i == 0) + cur_best = MakeHypothesisInfo(d->feature_values, sentscore); + if (!cur_good || sentscore < cur_good->mt_metric) + cur_good = MakeHypothesisInfo(d->feature_values, sentscore); + if (!cur_bad || sentscore > cur_bad->mt_metric) + cur_bad = MakeHypothesisInfo(d->feature_values, sentscore); + } + cerr << "GOOD: " << cur_good->mt_metric << endl; + cerr << " BAD: " << cur_bad->mt_metric << endl; + cerr << " #1: " << cur_best->mt_metric << endl; + } +}; + +void ReadTrainingCorpus(const string& fname, vector<string>* c) { + ReadFile rf(fname); + istream& in = *rf.stream(); + string line; + while(in) { + getline(in, line); + if (!in) break; + c->push_back(line); + } +} + +bool ApproxEqual(double a, double b) { + if (a == b) return true; + return (fabs(a-b)/fabs(b)) < 0.000001; +} + +int main(int argc, char** argv) { + register_feature_functions(); + //SetSilent(true); // turn off verbose decoder output + + po::variables_map conf; + if (!InitCommandLine(argc, argv, &conf)) return 1; + + vector<string> corpus; + ReadTrainingCorpus(conf["source"].as<string>(), &corpus); + const string metric_name = conf["mt_metric"].as<string>(); + ScoreType type = ScoreTypeFromString(metric_name); + DocScorer ds(type, conf["reference"].as<vector<string> >(), ""); + cerr << "Loaded " << ds.size() << " references for scoring with " << metric_name << endl; + if (ds.size() != corpus.size()) { + cerr << "Mismatched number of references (" << ds.size() << ") and sources (" << corpus.size() << ")\n"; + return 1; + } + // load initial weights + Weights weights; + weights.InitFromFile(conf["input_weights"].as<string>()); + SparseVector<double> lambdas; + weights.InitSparseVector(&lambdas); + + // freeze feature set (should be optional?) + const bool freeze_feature_set = true; + if (freeze_feature_set) FD::Freeze(); + + ReadFile ini_rf(conf["decoder_config"].as<string>()); + Decoder decoder(ini_rf.stream()); + const double max_step_size = conf["max_step_size"].as<double>(); + const double mt_metric_scale = conf["mt_metric_scale"].as<double>(); + + assert(corpus.size() > 0); + vector<GoodBadOracle> oracles(corpus.size()); + + TrainingObserver observer(ds, &oracles); + int cur_sent = 0; + bool converged = false; + vector<double> dense_weights; + while (!converged) { + dense_weights.clear(); + weights.InitFromVector(lambdas); + weights.InitVector(&dense_weights); + decoder.SetWeights(dense_weights); + if (corpus.size() == cur_sent) cur_sent = 0; + decoder.SetId(cur_sent); + decoder.Decode(corpus[cur_sent], &observer); // update oracles + const HypothesisInfo& cur_hyp = observer.GetCurrentBestHypothesis(); + const HypothesisInfo& cur_good = *oracles[cur_sent].good; + const HypothesisInfo& cur_bad = *oracles[cur_sent].bad; + if (!ApproxEqual(cur_hyp.mt_metric, cur_good.mt_metric)) { + const double loss = cur_bad.features.dot(dense_weights) - cur_good.features.dot(dense_weights) + + mt_metric_scale * (cur_good.mt_metric - cur_bad.mt_metric); + cerr << "LOSS: " << loss << endl; + if (loss > 0.0) { + SparseVector<double> diff = cur_good.features; + diff -= cur_bad.features; + double step_size = loss / diff.l2norm_sq(); + //cerr << loss << " " << step_size << " " << diff << endl; + if (step_size > max_step_size) step_size = max_step_size; + lambdas += (cur_good.features * step_size); + lambdas -= (cur_bad.features * step_size); + //cerr << "L: " << lambdas << endl; + } + } + ++cur_sent; + static int cc = 0; ++cc; if (cc==250) converged = true; + } + weights.WriteToFile("-"); + return 0; +} + diff --git a/utils/sparse_vector.h b/utils/sparse_vector.h index 274220ef..e3721b50 100644 --- a/utils/sparse_vector.h +++ b/utils/sparse_vector.h @@ -54,16 +54,16 @@ SparseVector<T> operator*(const SparseVector<T>& a, const T& b) { return result *= b; } -template <class T> -SparseVector<T> operator/(const SparseVector<T>& a, const double& b) { +template <class T, typename S> +SparseVector<T> operator/(const SparseVector<T>& a, const S& b) { SparseVector<T> result = a; - return result *= b; + return result /= b; } template <class T> -SparseVector<T> operator/(const SparseVector<T>& a, const T& b) { +SparseVector<T> operator/(const SparseVector<T>& a, const double& b) { SparseVector<T> result = a; - return result *= b; + return result /= b; } #include "fdict.h" |