From d3fa97575ddfe1a91b13a407ade5f86bd305b2e0 Mon Sep 17 00:00:00 2001 From: Avneesh Date: Fri, 28 Sep 2012 11:09:33 -0700 Subject: adding latent SSVM code, modified Makefile.am and configure.ac files --- latent_svm/Makefile.am | 6 + latent_svm/latent_svm.cc | 412 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 418 insertions(+) create mode 100644 latent_svm/Makefile.am create mode 100644 latent_svm/latent_svm.cc diff --git a/latent_svm/Makefile.am b/latent_svm/Makefile.am new file mode 100644 index 00000000..673b9159 --- /dev/null +++ b/latent_svm/Makefile.am @@ -0,0 +1,6 @@ +bin_PROGRAMS = latent_svm + +latent_svm_SOURCES = latent_svm.cc +latent_svm_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/latent_svm/latent_svm.cc b/latent_svm/latent_svm.cc new file mode 100644 index 00000000..ab9c1d5d --- /dev/null +++ b/latent_svm/latent_svm.cc @@ -0,0 +1,412 @@ +/* +Points to note regarding variable names: +total_loss and prev_loss actually refer not to loss, but the metric (usually BLEU) +*/ +#include +#include +#include +#include +#include + +//boost libraries +#include +#include +#include + +//cdec libraries +#include "config.h" +#include "hg_sampler.h" +#include "sentence_metadata.h" +#include "scorer.h" +#include "verbose.h" +#include "viterbi.h" +#include "hg.h" +#include "prob.h" +#include "kbest.h" +#include "ff_register.h" +#include "decoder.h" +#include "filelib.h" +#include "fdict.h" +#include "weights.h" +#include "sparse_vector.h" +#include "sampler.h" + +using namespace std; +using boost::shared_ptr; +namespace po = boost::program_options; + +bool invert_score; +boost::shared_ptr rng; //random seed ptr + +void RandomPermutation(int len, vector* p_ids) { + vector& ids = *p_ids; + ids.resize(len); + for (int i = 0; i < len; ++i) ids[i] = i; + for (int i = len; i > 0; --i) { + int j = rng->next() * i; + if (j == i) i--; + swap(ids[i-1], ids[j]); + } +} + +bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("weights,w",po::value(),"[REQD] Input feature weights file") + ("input,i",po::value(),"[REQD] Input source file for development set") + ("passes,p", po::value()->default_value(15), "Number of passes through the training data") + ("weights_write_interval,n", po::value()->default_value(1000), "Number of lines between writing out weights") + ("reference,r",po::value >(), "[REQD] Reference translation(s) (tokenized text file)") + ("mt_metric,m",po::value()->default_value("ibm_bleu"), "Scoring metric (ibm_bleu, nist_bleu, koehn_bleu, ter, combi)") + ("regularizer_strength,C", po::value()->default_value(0.01), "regularization strength") + ("mt_metric_scale,s", po::value()->default_value(1.0), "Cost function is -mt_metric_scale*BLEU") + ("costaug_log_bleu,l", "Flag converts BLEU to log space. Cost function is thus -mt_metric_scale*log(BLEU). Not on by default") + ("average,A", "Average the weights (this is a weighted average due to the scaling factor)") + ("mu,u", po::value()->default_value(0.0), "weight (between 0 and 1) to scale model score by for oracle selection") + ("stepsize_param,a", po::value()->default_value(0.01), "Stepsize parameter, during optimization") + ("stepsize_reduce,t", "Divide step size by sqrt(number of examples seen so far), as per Ratliff et al., 2007") + ("metric_threshold,T", po::value()->default_value(0.0), "Threshold for diff between oracle BLEU and cost-aug BLEU for updating the weights") + ("check_positive,P", "Check that the loss is positive before updating") + ("k_best_size,k", po::value()->default_value(250), "Size of hypothesis list to search for oracles") + ("best_ever,b", "Keep track of the best hypothesis we've ever seen (metric score), and use that as the reference") + ("random_seed,S", po::value(), "Random seed (if not specified, /dev/random will be used)") + ("decoder_config,c",po::value(),"Decoder configuration file"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || !conf->count("weights") || !conf->count("input") || !conf->count("decoder_config") || !conf->count("reference")) { + cerr << dcmdline_options << endl; + return false; + } + return true; +} + +double scaling_trick = 1; // see http://blog.smola.org/post/940672544/fast-quadratic-regularization-for-online-learning +/*computes and returns cost augmented score for negative example selection*/ +double cost_augmented_score(const LogVal model_score, const double mt_metric_score, const double mt_metric_scale, const bool logbleu) { + if(logbleu) { + if(mt_metric_score != 0) + // NOTE: log(model_score) is just the model score feature weights * features + return log(model_score) * scaling_trick + (- mt_metric_scale * log(mt_metric_score)); + else + return -1000000; + } + // NOTE: log(model_score) is just the model score feature weights * features + return log(model_score) * scaling_trick + (- mt_metric_scale * mt_metric_score); +} + +/*computes and returns mu score, for oracle selection*/ +double muscore(const vector& feature_weights, const SparseVector& feature_values, const double mt_metric_score, const double mu, const bool logbleu) { + if(logbleu) { + if(mt_metric_score != 0) + return feature_values.dot(feature_weights) * mu + (1 - mu) * log(mt_metric_score); + else + return feature_values.dot(feature_weights) * mu + (1 - mu) * (-1000000); // log(0) is -inf + } + return feature_values.dot(feature_weights) * mu + (1 - mu) * mt_metric_score; +} + +static const double kMINUS_EPSILON = -1e-6; + +struct HypothesisInfo { + SparseVector features; + double mt_metric_score; + // The model score changes when the feature weights change, so it is not stored here + // It must be recomputed every time +}; + +struct GoodOracle { + shared_ptr good; +}; + +struct TrainingObserver : public DecoderObserver { + TrainingObserver(const int k, + const DocScorer& d, + vector* o, + const vector& feat_weights, + const double metric_scale, + const double Mu, + const bool bestever, + const bool LogBleu) : ds(d), feature_weights(feat_weights), oracles(*o), kbest_size(k), mt_metric_scale(metric_scale), mu(Mu), best_ever(bestever), log_bleu(LogBleu) {} + const DocScorer& ds; + const vector& feature_weights; + vector& oracles; + shared_ptr cur_best; + shared_ptr cur_costaug_best; + shared_ptr cur_ref; + const int kbest_size; + const double mt_metric_scale; + const double mu; + const bool best_ever; + const bool log_bleu; + + const HypothesisInfo& GetCurrentBestHypothesis() const { + return *cur_best; + } + + const HypothesisInfo& GetCurrentCostAugmentedHypothesis() const { + return *cur_costaug_best; + } + + const HypothesisInfo& GetCurrentReference() const { + return *cur_ref; + } + + virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { + UpdateOracles(smeta.GetSentenceID(), *hg); + } + + shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double metric) { + shared_ptr h(new HypothesisInfo); + h->features = feats; + h->mt_metric_score = metric; + return h; + } + + void UpdateOracles(int sent_id, const Hypergraph& forest) { + //shared_ptr& cur_ref = oracles[sent_id].good; + cur_ref = oracles[sent_id].good; + if(!best_ever) + cur_ref.reset(); + + KBest::KBestDerivations, ESentenceTraversal> kbest(forest, kbest_size); + double costaug_best_score = 0; + + for (int i = 0; i < kbest_size; ++i) { + const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = + kbest.LazyKthBest(forest.nodes_.size() - 1, i); + if (!d) break; + double mt_metric_score = ds[sent_id]->ScoreCandidate(d->yield)->ComputeScore(); //this might need to change!! + const SparseVector& feature_vals = d->feature_values; + double costaugmented_score = cost_augmented_score(d->score, mt_metric_score, mt_metric_scale, log_bleu); //note that d->score, i.e., model score, is passed in + if (i == 0) { //i.e., setting up cur_best to be model score highest, and initializing costaug_best + cur_best = MakeHypothesisInfo(feature_vals, mt_metric_score); + cur_costaug_best = cur_best; + costaug_best_score = costaugmented_score; + } + if (costaugmented_score > costaug_best_score) { // kbest_mira's cur_bad, i.e., "fear" derivation + cur_costaug_best = MakeHypothesisInfo(feature_vals, mt_metric_score); + costaug_best_score = costaugmented_score; + } + double cur_muscore = mt_metric_score; + if (!cur_ref) // kbest_mira's cur_good, i.e., "hope" derivation + cur_ref = MakeHypothesisInfo(feature_vals, cur_muscore); + else { + double cur_ref_muscore = cur_ref->mt_metric_score; + if(mu > 0) { //select oracle with mixture of model score and BLEU + cur_ref_muscore = muscore(feature_weights, cur_ref->features, cur_ref->mt_metric_score, mu, log_bleu); + cur_muscore = muscore(feature_weights, d->feature_values, mt_metric_score, mu, log_bleu); + } + if (cur_muscore > cur_ref_muscore) //replace oracle + cur_ref = MakeHypothesisInfo(feature_vals, mt_metric_score); + } + } + } +}; + +void ReadTrainingCorpus(const string& fname, vector* c) { + ReadFile rf(fname); + istream& in = *rf.stream(); + string line; + while(in) { + getline(in, line); + if (!in) break; + c->push_back(line); + } +} + +bool ApproxEqual(double a, double b) { + if (a == b) return true; + return (fabs(a-b)/fabs(b)) < 0.000001; +} + +int main(int argc, char** argv) { + register_feature_functions(); + SetSilent(true); // turn off verbose decoder output + + po::variables_map conf; + if (!InitCommandLine(argc, argv, &conf)) return 1; + + if (conf.count("random_seed")) + rng.reset(new MT19937(conf["random_seed"].as())); + else + rng.reset(new MT19937); + + const bool best_ever = conf.count("best_ever") > 0; + vector corpus; + ReadTrainingCorpus(conf["input"].as(), &corpus); + + const string metric_name = conf["mt_metric"].as(); //set up scoring; this may need to be changed!! + + ScoreType type = ScoreTypeFromString(metric_name); + if (type == TER) { + invert_score = true; + } else { + invert_score = false; + } + DocScorer ds(type, conf["reference"].as >(), ""); + cerr << "Loaded " << ds.size() << " references for scoring with " << metric_name << endl; + if (ds.size() != corpus.size()) { + cerr << "Mismatched number of references (" << ds.size() << ") and sources (" << corpus.size() << ")\n"; + return 1; + } + + ReadFile ini_rf(conf["decoder_config"].as()); + Decoder decoder(ini_rf.stream()); + + // load initial weights + vector& decoder_weights = decoder.CurrentWeightVector(); //equivalent to "dense_weights" vector in kbest_mira.cc + SparseVector sparse_weights; //equivaelnt to kbest_mira.cc "lambdas" + Weights::InitFromFile(conf["weights"].as(), &decoder_weights); + Weights::InitSparseVector(decoder_weights, &sparse_weights); + + //initializing other algorithm and output parameters + const double c = conf["regularizer_strength"].as(); + const int weights_write_interval = conf["weights_write_interval"].as(); + const double mt_metric_scale = conf["mt_metric_scale"].as(); + const double mu = conf["mu"].as(); + const double metric_threshold = conf["metric_threshold"].as(); + const double stepsize_param = conf["stepsize_param"].as(); //step size in structured SGD optimization step + const bool stepsize_reduce = conf.count("stepsize_reduce") > 0; + const bool costaug_log_bleu = conf.count("costaug_log_bleu") > 0; + const bool average = conf.count("average") > 0; + const bool checkpositive = conf.count("check_positive") > 0; + + assert(corpus.size() > 0); + vector oracles(corpus.size()); + TrainingObserver observer(conf["k_best_size"].as(), // kbest size + ds, // doc scorer + &oracles, + decoder_weights, + mt_metric_scale, + mu, + best_ever, + costaug_log_bleu); + int cur_sent = 0; + int line_count = 0; + int normalizer = 0; + double total_loss = 0; + double prev_loss = 0; + int dots = 0; // progess bar + int cur_pass = 0; + SparseVector tot; + tot += sparse_weights; //add initial weights to total + normalizer++; //add 1 to normalizer + int max_iteration = conf["passes"].as(); + string msg = "# LatentSVM tuned weights"; + vector order; + int interval_counter = 0; + RandomPermutation(corpus.size(), &order); //shuffle corpus + while (line_count <= max_iteration * corpus.size()) { //loop over all (passes * num sentences) examples + //if ((interval_counter * 40 / weights_write_interval) > dots) { ++dots; cerr << '.'; } //check this + if ((cur_sent * 40 / corpus.size()) > dots) { ++dots; cerr << '.';} + if (interval_counter == weights_write_interval) { //i.e., we need to write out weights + sparse_weights *= scaling_trick; + tot *= scaling_trick; + scaling_trick = 1; + cerr << " [SENTENCE NUMBER= " << cur_sent << "\n"; + cerr << " [AVG METRIC LAST INTERVAL =" << ((total_loss - prev_loss) / weights_write_interval) << "]\n"; + cerr << " [AVG METRIC THIS PASS THUS FAR =" << (total_loss / cur_sent) << "]\n"; + cerr << " [TOTAL LOSS: =" << total_loss << "\n"; + Weights::ShowLargestFeatures(decoder_weights); + //dots = 0; + interval_counter = 0; + prev_loss = total_loss; + if (average){ + SparseVector x = tot; + x /= normalizer; + ostringstream sa; + sa << "weights.latentsvm-" << line_count/weights_write_interval << "-avg.gz"; + x.init_vector(&decoder_weights); + Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); + } + else { + ostringstream os; + os << "weights.latentsvm-" << line_count/weights_write_interval << ".gz"; + sparse_weights.init_vector(&decoder_weights); + Weights::WriteToFile(os.str(), decoder_weights, true, &msg); + } + } + if (corpus.size() == cur_sent) { //i.e., finished a pass + //cerr << " [AVG METRIC LAST PASS=" << (document_metric_score / corpus.size()) << "]\n"; + cerr << " [AVG METRIC LAST PASS=" << (total_loss / corpus.size()) << "]\n"; + cerr << " TOTAL LOSS: " << total_loss << "\n"; + Weights::ShowLargestFeatures(decoder_weights); + cur_sent = 0; + total_loss = 0; + dots = 0; + if(average) { + SparseVector x = tot; + x /= normalizer; + ostringstream sa; + sa << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << "-avg.gz"; + x.init_vector(&decoder_weights); + Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); + } + else { + ostringstream os; + os << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << ".gz"; + Weights::WriteToFile(os.str(), decoder_weights, true, &msg); + } + cur_pass++; + RandomPermutation(corpus.size(), &order); + } + if (cur_sent == 0) { //i.e., starting a new pass + cerr << "PASS " << (line_count / corpus.size() + 1) << endl; + } + sparse_weights.init_vector(&decoder_weights); // copy sparse_weights to the decoder weights + decoder.SetId(order[cur_sent]); //assign current sentence + decoder.Decode(corpus[order[cur_sent]], &observer); // decode/update oracles + + const HypothesisInfo& cur_best = observer.GetCurrentBestHypothesis(); //model score best + const HypothesisInfo& cur_costaug = observer.GetCurrentCostAugmentedHypothesis(); //(model + cost) best; cost = -metric_scale*log(BLEU) or -metric_scale*BLEU + //const HypothesisInfo& cur_ref = *oracles[order[cur_sent]].good; //this oracle-best line only picks based on BLEU + const HypothesisInfo& cur_ref = observer.GetCurrentReference(); //if mu > 0, this mu-mixed oracle will be picked; otherwise, only on BLEU + total_loss += cur_best.mt_metric_score; + + double step_size = stepsize_param; + if (stepsize_reduce){ // w_{t+1} = w_t - stepsize_t * grad(Loss) + step_size /= (sqrt(cur_sent+1.0)); + } + //actual update step - compute gradient, and modify sparse_weights + if(cur_ref.mt_metric_score - cur_costaug.mt_metric_score > metric_threshold) { + const double loss = (cur_costaug.features.dot(decoder_weights) - cur_ref.features.dot(decoder_weights)) * scaling_trick + mt_metric_scale * (cur_ref.mt_metric_score - cur_costaug.mt_metric_score); + if (!checkpositive || loss > 0.0) { //can update either all the time if check positive is off, or only when loss > 0 if it's on + sparse_weights -= cur_costaug.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // cost augmented hyp orig - + sparse_weights += cur_ref.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // ref orig + + } + } + scaling_trick *= (1.0 - 2.0 * step_size * c); + + tot += sparse_weights; //for averaging purposes + normalizer++; //for averaging purposes + line_count++; + interval_counter++; + cur_sent++; + } + cerr << endl; + if(average) { + tot /= normalizer; + tot.init_vector(decoder_weights); + msg = "# Latent SSVM tuned weights (averaged vector)"; + Weights::WriteToFile("weights.latentsvm-final-avg.gz", decoder_weights, true, &msg); + cerr << "Optimization complete.\n" << "AVERAGED WEIGHTS: weights.latentsvm-final-avg.gz\n"; + } else { + Weights::WriteToFile("weights.latentsvm-final.gz", decoder_weights, true, &msg); + cerr << "Optimization complete.\n"; + } + return 0; +} + -- cgit v1.2.3 From be7f57fdd484e063775d7abf083b9fa4c403b610 Mon Sep 17 00:00:00 2001 From: Avneesh Date: Fri, 28 Sep 2012 11:22:55 -0700 Subject: latent SSVM code, new Makefile.am and configure.ac --- Makefile.am | 3 ++- configure.ac | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index 24aafd63..b16fc90f 100644 --- a/Makefile.am +++ b/Makefile.am @@ -11,6 +11,7 @@ SUBDIRS = \ training \ training/liblbfgs \ mira \ + latent_svm \ dtrain \ dpmert \ pro-train \ @@ -18,7 +19,7 @@ SUBDIRS = \ minrisk \ gi/pf \ gi/markov_al \ - rst_parser + rst_parser #gi/pyp-topics/src gi/clda/src gi/posterior-regularisation/prjava diff --git a/configure.ac b/configure.ac index ea9e84fb..dc68ab77 100644 --- a/configure.ac +++ b/configure.ac @@ -124,6 +124,7 @@ AC_CONFIG_FILES([minrisk/Makefile]) AC_CONFIG_FILES([klm/util/Makefile]) AC_CONFIG_FILES([klm/lm/Makefile]) AC_CONFIG_FILES([mira/Makefile]) +AC_CONFIG_FILES([latent_svm/Makefile]) AC_CONFIG_FILES([dtrain/Makefile]) AC_CONFIG_FILES([gi/pyp-topics/src/Makefile]) AC_CONFIG_FILES([gi/clda/src/Makefile]) -- cgit v1.2.3 From 52194eb06b5b8b95eea38503dcc3253bb6d64171 Mon Sep 17 00:00:00 2001 From: Avneesh Saluja Date: Thu, 28 Mar 2013 18:57:58 -0700 Subject: re-organized latent SVM (sub-dir of training now) --- latent_svm/Makefile.am | 6 - latent_svm/latent_svm.cc | 412 ----------------------------------------------- 2 files changed, 418 deletions(-) delete mode 100644 latent_svm/Makefile.am delete mode 100644 latent_svm/latent_svm.cc diff --git a/latent_svm/Makefile.am b/latent_svm/Makefile.am deleted file mode 100644 index 673b9159..00000000 --- a/latent_svm/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -bin_PROGRAMS = latent_svm - -latent_svm_SOURCES = latent_svm.cc -latent_svm_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/latent_svm/latent_svm.cc b/latent_svm/latent_svm.cc deleted file mode 100644 index ab9c1d5d..00000000 --- a/latent_svm/latent_svm.cc +++ /dev/null @@ -1,412 +0,0 @@ -/* -Points to note regarding variable names: -total_loss and prev_loss actually refer not to loss, but the metric (usually BLEU) -*/ -#include -#include -#include -#include -#include - -//boost libraries -#include -#include -#include - -//cdec libraries -#include "config.h" -#include "hg_sampler.h" -#include "sentence_metadata.h" -#include "scorer.h" -#include "verbose.h" -#include "viterbi.h" -#include "hg.h" -#include "prob.h" -#include "kbest.h" -#include "ff_register.h" -#include "decoder.h" -#include "filelib.h" -#include "fdict.h" -#include "weights.h" -#include "sparse_vector.h" -#include "sampler.h" - -using namespace std; -using boost::shared_ptr; -namespace po = boost::program_options; - -bool invert_score; -boost::shared_ptr rng; //random seed ptr - -void RandomPermutation(int len, vector* p_ids) { - vector& ids = *p_ids; - ids.resize(len); - for (int i = 0; i < len; ++i) ids[i] = i; - for (int i = len; i > 0; --i) { - int j = rng->next() * i; - if (j == i) i--; - swap(ids[i-1], ids[j]); - } -} - -bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { - po::options_description opts("Configuration options"); - opts.add_options() - ("weights,w",po::value(),"[REQD] Input feature weights file") - ("input,i",po::value(),"[REQD] Input source file for development set") - ("passes,p", po::value()->default_value(15), "Number of passes through the training data") - ("weights_write_interval,n", po::value()->default_value(1000), "Number of lines between writing out weights") - ("reference,r",po::value >(), "[REQD] Reference translation(s) (tokenized text file)") - ("mt_metric,m",po::value()->default_value("ibm_bleu"), "Scoring metric (ibm_bleu, nist_bleu, koehn_bleu, ter, combi)") - ("regularizer_strength,C", po::value()->default_value(0.01), "regularization strength") - ("mt_metric_scale,s", po::value()->default_value(1.0), "Cost function is -mt_metric_scale*BLEU") - ("costaug_log_bleu,l", "Flag converts BLEU to log space. Cost function is thus -mt_metric_scale*log(BLEU). Not on by default") - ("average,A", "Average the weights (this is a weighted average due to the scaling factor)") - ("mu,u", po::value()->default_value(0.0), "weight (between 0 and 1) to scale model score by for oracle selection") - ("stepsize_param,a", po::value()->default_value(0.01), "Stepsize parameter, during optimization") - ("stepsize_reduce,t", "Divide step size by sqrt(number of examples seen so far), as per Ratliff et al., 2007") - ("metric_threshold,T", po::value()->default_value(0.0), "Threshold for diff between oracle BLEU and cost-aug BLEU for updating the weights") - ("check_positive,P", "Check that the loss is positive before updating") - ("k_best_size,k", po::value()->default_value(250), "Size of hypothesis list to search for oracles") - ("best_ever,b", "Keep track of the best hypothesis we've ever seen (metric score), and use that as the reference") - ("random_seed,S", po::value(), "Random seed (if not specified, /dev/random will be used)") - ("decoder_config,c",po::value(),"Decoder configuration file"); - po::options_description clo("Command line options"); - clo.add_options() - ("config", po::value(), "Configuration file") - ("help,h", "Print this help message and exit"); - po::options_description dconfig_options, dcmdline_options; - dconfig_options.add(opts); - dcmdline_options.add(opts).add(clo); - - po::store(parse_command_line(argc, argv, dcmdline_options), *conf); - if (conf->count("config")) { - ifstream config((*conf)["config"].as().c_str()); - po::store(po::parse_config_file(config, dconfig_options), *conf); - } - po::notify(*conf); - - if (conf->count("help") || !conf->count("weights") || !conf->count("input") || !conf->count("decoder_config") || !conf->count("reference")) { - cerr << dcmdline_options << endl; - return false; - } - return true; -} - -double scaling_trick = 1; // see http://blog.smola.org/post/940672544/fast-quadratic-regularization-for-online-learning -/*computes and returns cost augmented score for negative example selection*/ -double cost_augmented_score(const LogVal model_score, const double mt_metric_score, const double mt_metric_scale, const bool logbleu) { - if(logbleu) { - if(mt_metric_score != 0) - // NOTE: log(model_score) is just the model score feature weights * features - return log(model_score) * scaling_trick + (- mt_metric_scale * log(mt_metric_score)); - else - return -1000000; - } - // NOTE: log(model_score) is just the model score feature weights * features - return log(model_score) * scaling_trick + (- mt_metric_scale * mt_metric_score); -} - -/*computes and returns mu score, for oracle selection*/ -double muscore(const vector& feature_weights, const SparseVector& feature_values, const double mt_metric_score, const double mu, const bool logbleu) { - if(logbleu) { - if(mt_metric_score != 0) - return feature_values.dot(feature_weights) * mu + (1 - mu) * log(mt_metric_score); - else - return feature_values.dot(feature_weights) * mu + (1 - mu) * (-1000000); // log(0) is -inf - } - return feature_values.dot(feature_weights) * mu + (1 - mu) * mt_metric_score; -} - -static const double kMINUS_EPSILON = -1e-6; - -struct HypothesisInfo { - SparseVector features; - double mt_metric_score; - // The model score changes when the feature weights change, so it is not stored here - // It must be recomputed every time -}; - -struct GoodOracle { - shared_ptr good; -}; - -struct TrainingObserver : public DecoderObserver { - TrainingObserver(const int k, - const DocScorer& d, - vector* o, - const vector& feat_weights, - const double metric_scale, - const double Mu, - const bool bestever, - const bool LogBleu) : ds(d), feature_weights(feat_weights), oracles(*o), kbest_size(k), mt_metric_scale(metric_scale), mu(Mu), best_ever(bestever), log_bleu(LogBleu) {} - const DocScorer& ds; - const vector& feature_weights; - vector& oracles; - shared_ptr cur_best; - shared_ptr cur_costaug_best; - shared_ptr cur_ref; - const int kbest_size; - const double mt_metric_scale; - const double mu; - const bool best_ever; - const bool log_bleu; - - const HypothesisInfo& GetCurrentBestHypothesis() const { - return *cur_best; - } - - const HypothesisInfo& GetCurrentCostAugmentedHypothesis() const { - return *cur_costaug_best; - } - - const HypothesisInfo& GetCurrentReference() const { - return *cur_ref; - } - - virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { - UpdateOracles(smeta.GetSentenceID(), *hg); - } - - shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double metric) { - shared_ptr h(new HypothesisInfo); - h->features = feats; - h->mt_metric_score = metric; - return h; - } - - void UpdateOracles(int sent_id, const Hypergraph& forest) { - //shared_ptr& cur_ref = oracles[sent_id].good; - cur_ref = oracles[sent_id].good; - if(!best_ever) - cur_ref.reset(); - - KBest::KBestDerivations, ESentenceTraversal> kbest(forest, kbest_size); - double costaug_best_score = 0; - - for (int i = 0; i < kbest_size; ++i) { - const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - double mt_metric_score = ds[sent_id]->ScoreCandidate(d->yield)->ComputeScore(); //this might need to change!! - const SparseVector& feature_vals = d->feature_values; - double costaugmented_score = cost_augmented_score(d->score, mt_metric_score, mt_metric_scale, log_bleu); //note that d->score, i.e., model score, is passed in - if (i == 0) { //i.e., setting up cur_best to be model score highest, and initializing costaug_best - cur_best = MakeHypothesisInfo(feature_vals, mt_metric_score); - cur_costaug_best = cur_best; - costaug_best_score = costaugmented_score; - } - if (costaugmented_score > costaug_best_score) { // kbest_mira's cur_bad, i.e., "fear" derivation - cur_costaug_best = MakeHypothesisInfo(feature_vals, mt_metric_score); - costaug_best_score = costaugmented_score; - } - double cur_muscore = mt_metric_score; - if (!cur_ref) // kbest_mira's cur_good, i.e., "hope" derivation - cur_ref = MakeHypothesisInfo(feature_vals, cur_muscore); - else { - double cur_ref_muscore = cur_ref->mt_metric_score; - if(mu > 0) { //select oracle with mixture of model score and BLEU - cur_ref_muscore = muscore(feature_weights, cur_ref->features, cur_ref->mt_metric_score, mu, log_bleu); - cur_muscore = muscore(feature_weights, d->feature_values, mt_metric_score, mu, log_bleu); - } - if (cur_muscore > cur_ref_muscore) //replace oracle - cur_ref = MakeHypothesisInfo(feature_vals, mt_metric_score); - } - } - } -}; - -void ReadTrainingCorpus(const string& fname, vector* c) { - ReadFile rf(fname); - istream& in = *rf.stream(); - string line; - while(in) { - getline(in, line); - if (!in) break; - c->push_back(line); - } -} - -bool ApproxEqual(double a, double b) { - if (a == b) return true; - return (fabs(a-b)/fabs(b)) < 0.000001; -} - -int main(int argc, char** argv) { - register_feature_functions(); - SetSilent(true); // turn off verbose decoder output - - po::variables_map conf; - if (!InitCommandLine(argc, argv, &conf)) return 1; - - if (conf.count("random_seed")) - rng.reset(new MT19937(conf["random_seed"].as())); - else - rng.reset(new MT19937); - - const bool best_ever = conf.count("best_ever") > 0; - vector corpus; - ReadTrainingCorpus(conf["input"].as(), &corpus); - - const string metric_name = conf["mt_metric"].as(); //set up scoring; this may need to be changed!! - - ScoreType type = ScoreTypeFromString(metric_name); - if (type == TER) { - invert_score = true; - } else { - invert_score = false; - } - DocScorer ds(type, conf["reference"].as >(), ""); - cerr << "Loaded " << ds.size() << " references for scoring with " << metric_name << endl; - if (ds.size() != corpus.size()) { - cerr << "Mismatched number of references (" << ds.size() << ") and sources (" << corpus.size() << ")\n"; - return 1; - } - - ReadFile ini_rf(conf["decoder_config"].as()); - Decoder decoder(ini_rf.stream()); - - // load initial weights - vector& decoder_weights = decoder.CurrentWeightVector(); //equivalent to "dense_weights" vector in kbest_mira.cc - SparseVector sparse_weights; //equivaelnt to kbest_mira.cc "lambdas" - Weights::InitFromFile(conf["weights"].as(), &decoder_weights); - Weights::InitSparseVector(decoder_weights, &sparse_weights); - - //initializing other algorithm and output parameters - const double c = conf["regularizer_strength"].as(); - const int weights_write_interval = conf["weights_write_interval"].as(); - const double mt_metric_scale = conf["mt_metric_scale"].as(); - const double mu = conf["mu"].as(); - const double metric_threshold = conf["metric_threshold"].as(); - const double stepsize_param = conf["stepsize_param"].as(); //step size in structured SGD optimization step - const bool stepsize_reduce = conf.count("stepsize_reduce") > 0; - const bool costaug_log_bleu = conf.count("costaug_log_bleu") > 0; - const bool average = conf.count("average") > 0; - const bool checkpositive = conf.count("check_positive") > 0; - - assert(corpus.size() > 0); - vector oracles(corpus.size()); - TrainingObserver observer(conf["k_best_size"].as(), // kbest size - ds, // doc scorer - &oracles, - decoder_weights, - mt_metric_scale, - mu, - best_ever, - costaug_log_bleu); - int cur_sent = 0; - int line_count = 0; - int normalizer = 0; - double total_loss = 0; - double prev_loss = 0; - int dots = 0; // progess bar - int cur_pass = 0; - SparseVector tot; - tot += sparse_weights; //add initial weights to total - normalizer++; //add 1 to normalizer - int max_iteration = conf["passes"].as(); - string msg = "# LatentSVM tuned weights"; - vector order; - int interval_counter = 0; - RandomPermutation(corpus.size(), &order); //shuffle corpus - while (line_count <= max_iteration * corpus.size()) { //loop over all (passes * num sentences) examples - //if ((interval_counter * 40 / weights_write_interval) > dots) { ++dots; cerr << '.'; } //check this - if ((cur_sent * 40 / corpus.size()) > dots) { ++dots; cerr << '.';} - if (interval_counter == weights_write_interval) { //i.e., we need to write out weights - sparse_weights *= scaling_trick; - tot *= scaling_trick; - scaling_trick = 1; - cerr << " [SENTENCE NUMBER= " << cur_sent << "\n"; - cerr << " [AVG METRIC LAST INTERVAL =" << ((total_loss - prev_loss) / weights_write_interval) << "]\n"; - cerr << " [AVG METRIC THIS PASS THUS FAR =" << (total_loss / cur_sent) << "]\n"; - cerr << " [TOTAL LOSS: =" << total_loss << "\n"; - Weights::ShowLargestFeatures(decoder_weights); - //dots = 0; - interval_counter = 0; - prev_loss = total_loss; - if (average){ - SparseVector x = tot; - x /= normalizer; - ostringstream sa; - sa << "weights.latentsvm-" << line_count/weights_write_interval << "-avg.gz"; - x.init_vector(&decoder_weights); - Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); - } - else { - ostringstream os; - os << "weights.latentsvm-" << line_count/weights_write_interval << ".gz"; - sparse_weights.init_vector(&decoder_weights); - Weights::WriteToFile(os.str(), decoder_weights, true, &msg); - } - } - if (corpus.size() == cur_sent) { //i.e., finished a pass - //cerr << " [AVG METRIC LAST PASS=" << (document_metric_score / corpus.size()) << "]\n"; - cerr << " [AVG METRIC LAST PASS=" << (total_loss / corpus.size()) << "]\n"; - cerr << " TOTAL LOSS: " << total_loss << "\n"; - Weights::ShowLargestFeatures(decoder_weights); - cur_sent = 0; - total_loss = 0; - dots = 0; - if(average) { - SparseVector x = tot; - x /= normalizer; - ostringstream sa; - sa << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << "-avg.gz"; - x.init_vector(&decoder_weights); - Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); - } - else { - ostringstream os; - os << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << ".gz"; - Weights::WriteToFile(os.str(), decoder_weights, true, &msg); - } - cur_pass++; - RandomPermutation(corpus.size(), &order); - } - if (cur_sent == 0) { //i.e., starting a new pass - cerr << "PASS " << (line_count / corpus.size() + 1) << endl; - } - sparse_weights.init_vector(&decoder_weights); // copy sparse_weights to the decoder weights - decoder.SetId(order[cur_sent]); //assign current sentence - decoder.Decode(corpus[order[cur_sent]], &observer); // decode/update oracles - - const HypothesisInfo& cur_best = observer.GetCurrentBestHypothesis(); //model score best - const HypothesisInfo& cur_costaug = observer.GetCurrentCostAugmentedHypothesis(); //(model + cost) best; cost = -metric_scale*log(BLEU) or -metric_scale*BLEU - //const HypothesisInfo& cur_ref = *oracles[order[cur_sent]].good; //this oracle-best line only picks based on BLEU - const HypothesisInfo& cur_ref = observer.GetCurrentReference(); //if mu > 0, this mu-mixed oracle will be picked; otherwise, only on BLEU - total_loss += cur_best.mt_metric_score; - - double step_size = stepsize_param; - if (stepsize_reduce){ // w_{t+1} = w_t - stepsize_t * grad(Loss) - step_size /= (sqrt(cur_sent+1.0)); - } - //actual update step - compute gradient, and modify sparse_weights - if(cur_ref.mt_metric_score - cur_costaug.mt_metric_score > metric_threshold) { - const double loss = (cur_costaug.features.dot(decoder_weights) - cur_ref.features.dot(decoder_weights)) * scaling_trick + mt_metric_scale * (cur_ref.mt_metric_score - cur_costaug.mt_metric_score); - if (!checkpositive || loss > 0.0) { //can update either all the time if check positive is off, or only when loss > 0 if it's on - sparse_weights -= cur_costaug.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // cost augmented hyp orig - - sparse_weights += cur_ref.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // ref orig + - } - } - scaling_trick *= (1.0 - 2.0 * step_size * c); - - tot += sparse_weights; //for averaging purposes - normalizer++; //for averaging purposes - line_count++; - interval_counter++; - cur_sent++; - } - cerr << endl; - if(average) { - tot /= normalizer; - tot.init_vector(decoder_weights); - msg = "# Latent SSVM tuned weights (averaged vector)"; - Weights::WriteToFile("weights.latentsvm-final-avg.gz", decoder_weights, true, &msg); - cerr << "Optimization complete.\n" << "AVERAGED WEIGHTS: weights.latentsvm-final-avg.gz\n"; - } else { - Weights::WriteToFile("weights.latentsvm-final.gz", decoder_weights, true, &msg); - cerr << "Optimization complete.\n"; - } - return 0; -} - -- cgit v1.2.3 From dc13d4b6c6a35563fb7dc5e426f8dc4142a03702 Mon Sep 17 00:00:00 2001 From: Avneesh Saluja Date: Thu, 28 Mar 2013 18:58:31 -0700 Subject: latent SVM --- training/latent_svm/Makefile.am | 6 + training/latent_svm/latent_svm.cc | 412 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 418 insertions(+) create mode 100644 training/latent_svm/Makefile.am create mode 100644 training/latent_svm/latent_svm.cc diff --git a/training/latent_svm/Makefile.am b/training/latent_svm/Makefile.am new file mode 100644 index 00000000..673b9159 --- /dev/null +++ b/training/latent_svm/Makefile.am @@ -0,0 +1,6 @@ +bin_PROGRAMS = latent_svm + +latent_svm_SOURCES = latent_svm.cc +latent_svm_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz + +AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/training/latent_svm/latent_svm.cc b/training/latent_svm/latent_svm.cc new file mode 100644 index 00000000..ab9c1d5d --- /dev/null +++ b/training/latent_svm/latent_svm.cc @@ -0,0 +1,412 @@ +/* +Points to note regarding variable names: +total_loss and prev_loss actually refer not to loss, but the metric (usually BLEU) +*/ +#include +#include +#include +#include +#include + +//boost libraries +#include +#include +#include + +//cdec libraries +#include "config.h" +#include "hg_sampler.h" +#include "sentence_metadata.h" +#include "scorer.h" +#include "verbose.h" +#include "viterbi.h" +#include "hg.h" +#include "prob.h" +#include "kbest.h" +#include "ff_register.h" +#include "decoder.h" +#include "filelib.h" +#include "fdict.h" +#include "weights.h" +#include "sparse_vector.h" +#include "sampler.h" + +using namespace std; +using boost::shared_ptr; +namespace po = boost::program_options; + +bool invert_score; +boost::shared_ptr rng; //random seed ptr + +void RandomPermutation(int len, vector* p_ids) { + vector& ids = *p_ids; + ids.resize(len); + for (int i = 0; i < len; ++i) ids[i] = i; + for (int i = len; i > 0; --i) { + int j = rng->next() * i; + if (j == i) i--; + swap(ids[i-1], ids[j]); + } +} + +bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("weights,w",po::value(),"[REQD] Input feature weights file") + ("input,i",po::value(),"[REQD] Input source file for development set") + ("passes,p", po::value()->default_value(15), "Number of passes through the training data") + ("weights_write_interval,n", po::value()->default_value(1000), "Number of lines between writing out weights") + ("reference,r",po::value >(), "[REQD] Reference translation(s) (tokenized text file)") + ("mt_metric,m",po::value()->default_value("ibm_bleu"), "Scoring metric (ibm_bleu, nist_bleu, koehn_bleu, ter, combi)") + ("regularizer_strength,C", po::value()->default_value(0.01), "regularization strength") + ("mt_metric_scale,s", po::value()->default_value(1.0), "Cost function is -mt_metric_scale*BLEU") + ("costaug_log_bleu,l", "Flag converts BLEU to log space. Cost function is thus -mt_metric_scale*log(BLEU). Not on by default") + ("average,A", "Average the weights (this is a weighted average due to the scaling factor)") + ("mu,u", po::value()->default_value(0.0), "weight (between 0 and 1) to scale model score by for oracle selection") + ("stepsize_param,a", po::value()->default_value(0.01), "Stepsize parameter, during optimization") + ("stepsize_reduce,t", "Divide step size by sqrt(number of examples seen so far), as per Ratliff et al., 2007") + ("metric_threshold,T", po::value()->default_value(0.0), "Threshold for diff between oracle BLEU and cost-aug BLEU for updating the weights") + ("check_positive,P", "Check that the loss is positive before updating") + ("k_best_size,k", po::value()->default_value(250), "Size of hypothesis list to search for oracles") + ("best_ever,b", "Keep track of the best hypothesis we've ever seen (metric score), and use that as the reference") + ("random_seed,S", po::value(), "Random seed (if not specified, /dev/random will be used)") + ("decoder_config,c",po::value(),"Decoder configuration file"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || !conf->count("weights") || !conf->count("input") || !conf->count("decoder_config") || !conf->count("reference")) { + cerr << dcmdline_options << endl; + return false; + } + return true; +} + +double scaling_trick = 1; // see http://blog.smola.org/post/940672544/fast-quadratic-regularization-for-online-learning +/*computes and returns cost augmented score for negative example selection*/ +double cost_augmented_score(const LogVal model_score, const double mt_metric_score, const double mt_metric_scale, const bool logbleu) { + if(logbleu) { + if(mt_metric_score != 0) + // NOTE: log(model_score) is just the model score feature weights * features + return log(model_score) * scaling_trick + (- mt_metric_scale * log(mt_metric_score)); + else + return -1000000; + } + // NOTE: log(model_score) is just the model score feature weights * features + return log(model_score) * scaling_trick + (- mt_metric_scale * mt_metric_score); +} + +/*computes and returns mu score, for oracle selection*/ +double muscore(const vector& feature_weights, const SparseVector& feature_values, const double mt_metric_score, const double mu, const bool logbleu) { + if(logbleu) { + if(mt_metric_score != 0) + return feature_values.dot(feature_weights) * mu + (1 - mu) * log(mt_metric_score); + else + return feature_values.dot(feature_weights) * mu + (1 - mu) * (-1000000); // log(0) is -inf + } + return feature_values.dot(feature_weights) * mu + (1 - mu) * mt_metric_score; +} + +static const double kMINUS_EPSILON = -1e-6; + +struct HypothesisInfo { + SparseVector features; + double mt_metric_score; + // The model score changes when the feature weights change, so it is not stored here + // It must be recomputed every time +}; + +struct GoodOracle { + shared_ptr good; +}; + +struct TrainingObserver : public DecoderObserver { + TrainingObserver(const int k, + const DocScorer& d, + vector* o, + const vector& feat_weights, + const double metric_scale, + const double Mu, + const bool bestever, + const bool LogBleu) : ds(d), feature_weights(feat_weights), oracles(*o), kbest_size(k), mt_metric_scale(metric_scale), mu(Mu), best_ever(bestever), log_bleu(LogBleu) {} + const DocScorer& ds; + const vector& feature_weights; + vector& oracles; + shared_ptr cur_best; + shared_ptr cur_costaug_best; + shared_ptr cur_ref; + const int kbest_size; + const double mt_metric_scale; + const double mu; + const bool best_ever; + const bool log_bleu; + + const HypothesisInfo& GetCurrentBestHypothesis() const { + return *cur_best; + } + + const HypothesisInfo& GetCurrentCostAugmentedHypothesis() const { + return *cur_costaug_best; + } + + const HypothesisInfo& GetCurrentReference() const { + return *cur_ref; + } + + virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { + UpdateOracles(smeta.GetSentenceID(), *hg); + } + + shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double metric) { + shared_ptr h(new HypothesisInfo); + h->features = feats; + h->mt_metric_score = metric; + return h; + } + + void UpdateOracles(int sent_id, const Hypergraph& forest) { + //shared_ptr& cur_ref = oracles[sent_id].good; + cur_ref = oracles[sent_id].good; + if(!best_ever) + cur_ref.reset(); + + KBest::KBestDerivations, ESentenceTraversal> kbest(forest, kbest_size); + double costaug_best_score = 0; + + for (int i = 0; i < kbest_size; ++i) { + const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = + kbest.LazyKthBest(forest.nodes_.size() - 1, i); + if (!d) break; + double mt_metric_score = ds[sent_id]->ScoreCandidate(d->yield)->ComputeScore(); //this might need to change!! + const SparseVector& feature_vals = d->feature_values; + double costaugmented_score = cost_augmented_score(d->score, mt_metric_score, mt_metric_scale, log_bleu); //note that d->score, i.e., model score, is passed in + if (i == 0) { //i.e., setting up cur_best to be model score highest, and initializing costaug_best + cur_best = MakeHypothesisInfo(feature_vals, mt_metric_score); + cur_costaug_best = cur_best; + costaug_best_score = costaugmented_score; + } + if (costaugmented_score > costaug_best_score) { // kbest_mira's cur_bad, i.e., "fear" derivation + cur_costaug_best = MakeHypothesisInfo(feature_vals, mt_metric_score); + costaug_best_score = costaugmented_score; + } + double cur_muscore = mt_metric_score; + if (!cur_ref) // kbest_mira's cur_good, i.e., "hope" derivation + cur_ref = MakeHypothesisInfo(feature_vals, cur_muscore); + else { + double cur_ref_muscore = cur_ref->mt_metric_score; + if(mu > 0) { //select oracle with mixture of model score and BLEU + cur_ref_muscore = muscore(feature_weights, cur_ref->features, cur_ref->mt_metric_score, mu, log_bleu); + cur_muscore = muscore(feature_weights, d->feature_values, mt_metric_score, mu, log_bleu); + } + if (cur_muscore > cur_ref_muscore) //replace oracle + cur_ref = MakeHypothesisInfo(feature_vals, mt_metric_score); + } + } + } +}; + +void ReadTrainingCorpus(const string& fname, vector* c) { + ReadFile rf(fname); + istream& in = *rf.stream(); + string line; + while(in) { + getline(in, line); + if (!in) break; + c->push_back(line); + } +} + +bool ApproxEqual(double a, double b) { + if (a == b) return true; + return (fabs(a-b)/fabs(b)) < 0.000001; +} + +int main(int argc, char** argv) { + register_feature_functions(); + SetSilent(true); // turn off verbose decoder output + + po::variables_map conf; + if (!InitCommandLine(argc, argv, &conf)) return 1; + + if (conf.count("random_seed")) + rng.reset(new MT19937(conf["random_seed"].as())); + else + rng.reset(new MT19937); + + const bool best_ever = conf.count("best_ever") > 0; + vector corpus; + ReadTrainingCorpus(conf["input"].as(), &corpus); + + const string metric_name = conf["mt_metric"].as(); //set up scoring; this may need to be changed!! + + ScoreType type = ScoreTypeFromString(metric_name); + if (type == TER) { + invert_score = true; + } else { + invert_score = false; + } + DocScorer ds(type, conf["reference"].as >(), ""); + cerr << "Loaded " << ds.size() << " references for scoring with " << metric_name << endl; + if (ds.size() != corpus.size()) { + cerr << "Mismatched number of references (" << ds.size() << ") and sources (" << corpus.size() << ")\n"; + return 1; + } + + ReadFile ini_rf(conf["decoder_config"].as()); + Decoder decoder(ini_rf.stream()); + + // load initial weights + vector& decoder_weights = decoder.CurrentWeightVector(); //equivalent to "dense_weights" vector in kbest_mira.cc + SparseVector sparse_weights; //equivaelnt to kbest_mira.cc "lambdas" + Weights::InitFromFile(conf["weights"].as(), &decoder_weights); + Weights::InitSparseVector(decoder_weights, &sparse_weights); + + //initializing other algorithm and output parameters + const double c = conf["regularizer_strength"].as(); + const int weights_write_interval = conf["weights_write_interval"].as(); + const double mt_metric_scale = conf["mt_metric_scale"].as(); + const double mu = conf["mu"].as(); + const double metric_threshold = conf["metric_threshold"].as(); + const double stepsize_param = conf["stepsize_param"].as(); //step size in structured SGD optimization step + const bool stepsize_reduce = conf.count("stepsize_reduce") > 0; + const bool costaug_log_bleu = conf.count("costaug_log_bleu") > 0; + const bool average = conf.count("average") > 0; + const bool checkpositive = conf.count("check_positive") > 0; + + assert(corpus.size() > 0); + vector oracles(corpus.size()); + TrainingObserver observer(conf["k_best_size"].as(), // kbest size + ds, // doc scorer + &oracles, + decoder_weights, + mt_metric_scale, + mu, + best_ever, + costaug_log_bleu); + int cur_sent = 0; + int line_count = 0; + int normalizer = 0; + double total_loss = 0; + double prev_loss = 0; + int dots = 0; // progess bar + int cur_pass = 0; + SparseVector tot; + tot += sparse_weights; //add initial weights to total + normalizer++; //add 1 to normalizer + int max_iteration = conf["passes"].as(); + string msg = "# LatentSVM tuned weights"; + vector order; + int interval_counter = 0; + RandomPermutation(corpus.size(), &order); //shuffle corpus + while (line_count <= max_iteration * corpus.size()) { //loop over all (passes * num sentences) examples + //if ((interval_counter * 40 / weights_write_interval) > dots) { ++dots; cerr << '.'; } //check this + if ((cur_sent * 40 / corpus.size()) > dots) { ++dots; cerr << '.';} + if (interval_counter == weights_write_interval) { //i.e., we need to write out weights + sparse_weights *= scaling_trick; + tot *= scaling_trick; + scaling_trick = 1; + cerr << " [SENTENCE NUMBER= " << cur_sent << "\n"; + cerr << " [AVG METRIC LAST INTERVAL =" << ((total_loss - prev_loss) / weights_write_interval) << "]\n"; + cerr << " [AVG METRIC THIS PASS THUS FAR =" << (total_loss / cur_sent) << "]\n"; + cerr << " [TOTAL LOSS: =" << total_loss << "\n"; + Weights::ShowLargestFeatures(decoder_weights); + //dots = 0; + interval_counter = 0; + prev_loss = total_loss; + if (average){ + SparseVector x = tot; + x /= normalizer; + ostringstream sa; + sa << "weights.latentsvm-" << line_count/weights_write_interval << "-avg.gz"; + x.init_vector(&decoder_weights); + Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); + } + else { + ostringstream os; + os << "weights.latentsvm-" << line_count/weights_write_interval << ".gz"; + sparse_weights.init_vector(&decoder_weights); + Weights::WriteToFile(os.str(), decoder_weights, true, &msg); + } + } + if (corpus.size() == cur_sent) { //i.e., finished a pass + //cerr << " [AVG METRIC LAST PASS=" << (document_metric_score / corpus.size()) << "]\n"; + cerr << " [AVG METRIC LAST PASS=" << (total_loss / corpus.size()) << "]\n"; + cerr << " TOTAL LOSS: " << total_loss << "\n"; + Weights::ShowLargestFeatures(decoder_weights); + cur_sent = 0; + total_loss = 0; + dots = 0; + if(average) { + SparseVector x = tot; + x /= normalizer; + ostringstream sa; + sa << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << "-avg.gz"; + x.init_vector(&decoder_weights); + Weights::WriteToFile(sa.str(), decoder_weights, true, &msg); + } + else { + ostringstream os; + os << "weights.latentsvm-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << ".gz"; + Weights::WriteToFile(os.str(), decoder_weights, true, &msg); + } + cur_pass++; + RandomPermutation(corpus.size(), &order); + } + if (cur_sent == 0) { //i.e., starting a new pass + cerr << "PASS " << (line_count / corpus.size() + 1) << endl; + } + sparse_weights.init_vector(&decoder_weights); // copy sparse_weights to the decoder weights + decoder.SetId(order[cur_sent]); //assign current sentence + decoder.Decode(corpus[order[cur_sent]], &observer); // decode/update oracles + + const HypothesisInfo& cur_best = observer.GetCurrentBestHypothesis(); //model score best + const HypothesisInfo& cur_costaug = observer.GetCurrentCostAugmentedHypothesis(); //(model + cost) best; cost = -metric_scale*log(BLEU) or -metric_scale*BLEU + //const HypothesisInfo& cur_ref = *oracles[order[cur_sent]].good; //this oracle-best line only picks based on BLEU + const HypothesisInfo& cur_ref = observer.GetCurrentReference(); //if mu > 0, this mu-mixed oracle will be picked; otherwise, only on BLEU + total_loss += cur_best.mt_metric_score; + + double step_size = stepsize_param; + if (stepsize_reduce){ // w_{t+1} = w_t - stepsize_t * grad(Loss) + step_size /= (sqrt(cur_sent+1.0)); + } + //actual update step - compute gradient, and modify sparse_weights + if(cur_ref.mt_metric_score - cur_costaug.mt_metric_score > metric_threshold) { + const double loss = (cur_costaug.features.dot(decoder_weights) - cur_ref.features.dot(decoder_weights)) * scaling_trick + mt_metric_scale * (cur_ref.mt_metric_score - cur_costaug.mt_metric_score); + if (!checkpositive || loss > 0.0) { //can update either all the time if check positive is off, or only when loss > 0 if it's on + sparse_weights -= cur_costaug.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // cost augmented hyp orig - + sparse_weights += cur_ref.features * step_size / ((1.0-2.0*step_size*c)*scaling_trick); // ref orig + + } + } + scaling_trick *= (1.0 - 2.0 * step_size * c); + + tot += sparse_weights; //for averaging purposes + normalizer++; //for averaging purposes + line_count++; + interval_counter++; + cur_sent++; + } + cerr << endl; + if(average) { + tot /= normalizer; + tot.init_vector(decoder_weights); + msg = "# Latent SSVM tuned weights (averaged vector)"; + Weights::WriteToFile("weights.latentsvm-final-avg.gz", decoder_weights, true, &msg); + cerr << "Optimization complete.\n" << "AVERAGED WEIGHTS: weights.latentsvm-final-avg.gz\n"; + } else { + Weights::WriteToFile("weights.latentsvm-final.gz", decoder_weights, true, &msg); + cerr << "Optimization complete.\n"; + } + return 0; +} + -- cgit v1.2.3 From 981c2e5b2b4415c9cf54532a3703ce4520c747e3 Mon Sep 17 00:00:00 2001 From: Avneesh Saluja Date: Thu, 28 Mar 2013 19:01:24 -0700 Subject: updated Makefiles --- training/Makefile.am | 1 + training/latent_svm/Makefile.am | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/training/Makefile.am b/training/Makefile.am index e95e045f..8ef3c939 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -6,6 +6,7 @@ SUBDIRS = \ dpmert \ pro \ dtrain \ + latent_svm \ mira \ rampion diff --git a/training/latent_svm/Makefile.am b/training/latent_svm/Makefile.am index 673b9159..65c5e038 100644 --- a/training/latent_svm/Makefile.am +++ b/training/latent_svm/Makefile.am @@ -1,6 +1,6 @@ bin_PROGRAMS = latent_svm latent_svm_SOURCES = latent_svm.cc -latent_svm_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz +latent_svm_LDADD = ../..//decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval -- cgit v1.2.3 From 19f9cb52e536ca36d38428ca7ac0104d315cbc97 Mon Sep 17 00:00:00 2001 From: Avneesh Saluja Date: Thu, 28 Mar 2013 22:42:04 -0700 Subject: updated configure.ac --- configure.ac | 1 + 1 file changed, 1 insertion(+) diff --git a/configure.ac b/configure.ac index 98deac86..8632fb51 100644 --- a/configure.ac +++ b/configure.ac @@ -128,6 +128,7 @@ AC_CONFIG_FILES([training/pro/Makefile]) AC_CONFIG_FILES([training/rampion/Makefile]) AC_CONFIG_FILES([training/minrisk/Makefile]) AC_CONFIG_FILES([training/mira/Makefile]) +AC_CONFIG_FILES([training/latent_svm/Makefile]) AC_CONFIG_FILES([training/dtrain/Makefile]) # external feature function example code -- cgit v1.2.3