summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
authorPaul Baltescu <pauldb89@gmail.com>2013-11-23 17:33:47 +0000
committerPaul Baltescu <pauldb89@gmail.com>2013-11-23 17:33:47 +0000
commit072c4bb1edde483b87b93bc6f4eec36fc8a21008 (patch)
tree6ceaa6ae1e08df9e523282740b14f4857236297c /training
parent7e90b8ea10904f9b83f4e77e14c7396a3e6f7d5d (diff)
parent9e80389b9763aa4f7f626ec71b561ccf6948d3ad (diff)
Merge branch 'master' of https://github.com/redpony/cdec
Diffstat (limited to 'training')
-rw-r--r--training/crf/mpi_online_optimize.cc8
-rw-r--r--training/dtrain/Makefile.am2
-rw-r--r--training/dtrain/README.md30
-rw-r--r--training/dtrain/dtrain.cc201
-rw-r--r--training/dtrain/dtrain.h2
-rw-r--r--training/dtrain/examples/standard/dtrain.ini11
-rw-r--r--training/dtrain/examples/standard/expected-output125
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.gzbin0 -> 113504 bytes
-rwxr-xr-xtraining/dtrain/parallelize.rb20
-rw-r--r--training/latent_svm/latent_svm.cc13
-rw-r--r--training/mira/kbest_cut_mira.cc72
-rw-r--r--training/mira/kbest_mira.cc18
-rwxr-xr-xtraining/mira/mira.py100
-rw-r--r--training/pro/mr_pro_map.cc1
-rw-r--r--training/utils/candidate_set.cc11
-rw-r--r--training/utils/online_optimizer.h8
-rw-r--r--training/utils/optimize_test.cc6
17 files changed, 395 insertions, 233 deletions
diff --git a/training/crf/mpi_online_optimize.cc b/training/crf/mpi_online_optimize.cc
index 9e1ae34c..6b5b7d64 100644
--- a/training/crf/mpi_online_optimize.cc
+++ b/training/crf/mpi_online_optimize.cc
@@ -4,11 +4,11 @@
#include <vector>
#include <cassert>
#include <cmath>
-#include <tr1/memory>
#include <ctime>
#include <boost/program_options.hpp>
#include <boost/program_options/variables_map.hpp>
+#include <boost/shared_ptr.hpp>
#include "stringlib.h"
#include "verbose.h"
@@ -219,7 +219,7 @@ int main(int argc, char** argv) {
#endif
if (size > 1) SetSilent(true); // turn off verbose decoder output
register_feature_functions();
- std::tr1::shared_ptr<MT19937> rng;
+ boost::shared_ptr<MT19937> rng;
po::variables_map conf;
if (!InitCommandLine(argc, argv, &conf))
@@ -264,8 +264,8 @@ int main(int argc, char** argv) {
ReadTrainingCorpus(conf["training_data"].as<string>(), rank, size, &corpus, &ids);
assert(corpus.size() > 0);
- std::tr1::shared_ptr<OnlineOptimizer> o;
- std::tr1::shared_ptr<LearningRateSchedule> lr;
+ boost::shared_ptr<OnlineOptimizer> o;
+ boost::shared_ptr<LearningRateSchedule> lr;
const unsigned size_per_proc = conf["minibatch_size_per_proc"].as<unsigned>();
if (size_per_proc > corpus.size()) {
diff --git a/training/dtrain/Makefile.am b/training/dtrain/Makefile.am
index 844c790d..ecb6c128 100644
--- a/training/dtrain/Makefile.am
+++ b/training/dtrain/Makefile.am
@@ -1,7 +1,7 @@
bin_PROGRAMS = dtrain
dtrain_SOURCES = dtrain.cc score.cc dtrain.h kbestget.h ksampler.h pairsampling.h score.h
-dtrain_LDADD = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a
+dtrain_LDADD = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a -lboost_regex
AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval
diff --git a/training/dtrain/README.md b/training/dtrain/README.md
index 2bae6b48..aa1ab3e7 100644
--- a/training/dtrain/README.md
+++ b/training/dtrain/README.md
@@ -1,10 +1,15 @@
This is a simple (and parallelizable) tuning method for cdec
-which is able to train the weights of very many (sparse) features.
-It was used here:
- "Joint Feature Selection in Distributed Stochastic
- Learning for Large-Scale Discriminative Training in
- SMT"
-(Simianer, Riezler, Dyer; ACL 2012)
+which is able to train the weights of very many (sparse) features
+on the training set.
+
+It was used in these papers:
+> "Joint Feature Selection in Distributed Stochastic
+> Learning for Large-Scale Discriminative Training in
+> SMT" (Simianer, Riezler, Dyer; ACL 2012)
+>
+> "Multi-Task Learning for Improved Discriminative
+> Training in SMT" (Simianer, Riezler; WMT 2013)
+>
Building
@@ -17,20 +22,9 @@ To build only parts needed for dtrain do
cd training/dtrain/; make
```
-Ideas
------
- * get approx_bleu to work?
- * implement minibatches (Minibatch and Parallelization for Online Large Margin Structured Learning)
- * learning rate 1/T?
- * use an oracle? mira-like (model vs. BLEU), feature repr. of reference!?
- * implement lc_bleu properly
- * merge kbest lists of previous epochs (as MERT does)
- * ``walk entire regularization path''
- * rerank after each update?
-
Running
-------
-See directories under test/ .
+See directories under examples/ .
Legal
-----
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 0ee2f124..0a27a068 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -12,8 +12,9 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
{
po::options_description ini("Configuration File Options");
ini.add_options()
- ("input", po::value<string>()->default_value("-"), "input file (src)")
+ ("input", po::value<string>(), "input file (src)")
("refs,r", po::value<string>(), "references")
+ ("bitext,b", po::value<string>(), "bitext: 'src ||| tgt'")
("output", po::value<string>()->default_value("-"), "output weights file, '-' for STDOUT")
("input_weights", po::value<string>(), "input weights file (e.g. from previous iteration)")
("decoder_config", po::value<string>(), "configuration file for cdec")
@@ -40,6 +41,10 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair")
("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near")
("max_pairs", po::value<unsigned>()->default_value(std::numeric_limits<unsigned>::max()), "max. # of pairs per Sent.")
+ ("pclr", po::value<string>()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate")
+ ("batch", po::value<bool>()->zero_tokens(), "do batch optimization")
+ ("repeat", po::value<unsigned>()->default_value(1), "repeat optimization over kbest list this number of times")
+ //("test-k-best", po::value<bool>()->zero_tokens(), "check if optimization works (use repeat >= 2)")
("noup", po::value<bool>()->zero_tokens(), "do not update weights");
po::options_description cl("Command Line Options");
cl.add_options()
@@ -72,13 +77,17 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as<string>() << "'." << endl;
return false;
}
- if(cfg->count("hi_lo") && (*cfg)["pair_sampling"].as<string>() != "XYX") {
+ if (cfg->count("hi_lo") && (*cfg)["pair_sampling"].as<string>() != "XYX") {
cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl;
}
- if((*cfg)["hi_lo"].as<float>() > 0.5 || (*cfg)["hi_lo"].as<float>() < 0.01) {
+ if ((*cfg)["hi_lo"].as<float>() > 0.5 || (*cfg)["hi_lo"].as<float>() < 0.01) {
cerr << "hi_lo must lie in [0.01, 0.5]" << endl;
return false;
}
+ if ((cfg->count("input")>0 || cfg->count("refs")>0) && cfg->count("bitext")>0) {
+ cerr << "Provide 'input' and 'refs' or 'bitext', not both." << endl;
+ return false;
+ }
if ((*cfg)["pair_threshold"].as<score_t>() < 0) {
cerr << "The threshold must be >= 0!" << endl;
return false;
@@ -120,10 +129,16 @@ main(int argc, char** argv)
const float hi_lo = cfg["hi_lo"].as<float>();
const score_t approx_bleu_d = cfg["approx_bleu_d"].as<score_t>();
const unsigned max_pairs = cfg["max_pairs"].as<unsigned>();
+ int repeat = cfg["repeat"].as<unsigned>();
+ //bool test_k_best = false;
+ //if (cfg.count("test-k-best")) test_k_best = true;
weight_t loss_margin = cfg["loss_margin"].as<weight_t>();
+ bool batch = false;
+ if (cfg.count("batch")) batch = true;
if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max();
bool scale_bleu_diff = false;
if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
+ const string pclr = cfg["pclr"].as<string>();
bool average = false;
if (select_weights == "avg")
average = true;
@@ -131,7 +146,6 @@ main(int argc, char** argv)
if (cfg.count("print_weights"))
boost::split(print_weights, cfg["print_weights"].as<string>(), boost::is_any_of(" "));
-
// setup decoder
register_feature_functions();
SetSilent(true);
@@ -178,17 +192,16 @@ main(int argc, char** argv)
observer->SetScorer(scorer);
// init weights
- vector<weight_t>& dense_weights = decoder.CurrentWeightVector();
+ vector<weight_t>& decoder_weights = decoder.CurrentWeightVector();
SparseVector<weight_t> lambdas, cumulative_penalties, w_average;
- if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as<string>(), &dense_weights);
- Weights::InitSparseVector(dense_weights, &lambdas);
+ if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as<string>(), &decoder_weights);
+ Weights::InitSparseVector(decoder_weights, &lambdas);
// meta params for perceptron, SVM
weight_t eta = cfg["learning_rate"].as<weight_t>();
weight_t gamma = cfg["gamma"].as<weight_t>();
// faster perceptron: consider only misranked pairs, see
- // DO NOT ENABLE WITH SVM (gamma > 0) OR loss_margin!
bool faster_perceptron = false;
if (gamma==0 && loss_margin==0) faster_perceptron = true;
@@ -208,13 +221,24 @@ main(int argc, char** argv)
// output
string output_fn = cfg["output"].as<string>();
// input
- string input_fn = cfg["input"].as<string>();
+ bool read_bitext = false;
+ string input_fn;
+ if (cfg.count("bitext")) {
+ read_bitext = true;
+ input_fn = cfg["bitext"].as<string>();
+ } else {
+ input_fn = cfg["input"].as<string>();
+ }
ReadFile input(input_fn);
// buffer input for t > 0
vector<string> src_str_buf; // source strings (decoder takes only strings)
vector<vector<WordID> > ref_ids_buf; // references as WordID vecs
- string refs_fn = cfg["refs"].as<string>();
- ReadFile refs(refs_fn);
+ ReadFile refs;
+ string refs_fn;
+ if (!read_bitext) {
+ refs_fn = cfg["refs"].as<string>();
+ refs.Init(refs_fn);
+ }
unsigned in_sz = std::numeric_limits<unsigned>::max(); // input index, input size
vector<pair<score_t, score_t> > all_scores;
@@ -229,6 +253,7 @@ main(int argc, char** argv)
cerr << setw(25) << "k " << k << endl;
cerr << setw(25) << "N " << N << endl;
cerr << setw(25) << "T " << T << endl;
+ cerr << setw(25) << "batch " << batch << endl;
cerr << setw(26) << "scorer '" << scorer_str << "'" << endl;
if (scorer_str == "approx_bleu")
cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl;
@@ -249,10 +274,14 @@ main(int argc, char** argv)
cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as<string>() << "'" << endl;
if (rescale)
cerr << setw(25) << "rescale " << rescale << endl;
+ cerr << setw(25) << "pclr " << pclr << endl;
cerr << setw(25) << "max pairs " << max_pairs << endl;
+ cerr << setw(25) << "repeat " << repeat << endl;
+ //cerr << setw(25) << "test k-best " << test_k_best << endl;
cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl;
cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
- cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
+ if (!read_bitext)
+ cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
if (cfg.count("input_weights"))
cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as<string>() << "'" << endl;
@@ -261,6 +290,11 @@ main(int argc, char** argv)
if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl;
}
+ // pclr
+ SparseVector<weight_t> learning_rates;
+ // batch
+ SparseVector<weight_t> batch_updates;
+ score_t batch_loss;
for (unsigned t = 0; t < T; t++) // T epochs
{
@@ -269,16 +303,24 @@ main(int argc, char** argv)
time(&start);
score_t score_sum = 0.;
score_t model_sum(0);
- unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0;
+ unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0, kbest_loss_improve = 0;
+ batch_loss = 0.;
if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl;
while(true)
{
string in;
+ string ref;
bool next = false, stop = false; // next iteration or premature stop
if (t == 0) {
if(!getline(*input, in)) next = true;
+ if(read_bitext) {
+ vector<string> strs;
+ boost::algorithm::split_regex(strs, in, boost::regex(" \\|\\|\\| "));
+ in = strs[0];
+ ref = strs[1];
+ }
} else {
if (ii == in_sz) next = true; // stop if we reach the end of our input
}
@@ -310,15 +352,16 @@ main(int argc, char** argv)
if (next || stop) break;
// weights
- lambdas.init_vector(&dense_weights);
+ lambdas.init_vector(&decoder_weights);
// getting input
vector<WordID> ref_ids; // reference as vector<WordID>
if (t == 0) {
- string r_;
- getline(*refs, r_);
+ if (!read_bitext) {
+ getline(*refs, ref);
+ }
vector<string> ref_tok;
- boost::split(ref_tok, r_, boost::is_any_of(" "));
+ boost::split(ref_tok, ref, boost::is_any_of(" "));
register_and_convert(ref_tok, ref_ids);
ref_ids_buf.push_back(ref_ids);
src_str_buf.push_back(in);
@@ -348,8 +391,10 @@ main(int argc, char** argv)
}
}
- score_sum += (*samples)[0].score; // stats for 1best
- model_sum += (*samples)[0].model;
+ if (repeat == 1) {
+ score_sum += (*samples)[0].score; // stats for 1best
+ model_sum += (*samples)[0].model;
+ }
f_count += observer->get_f_count();
list_sz += observer->get_sz();
@@ -364,30 +409,74 @@ main(int argc, char** argv)
partXYX(samples, pairs, pair_threshold, max_pairs, faster_perceptron, hi_lo);
if (pair_sampling == "PRO")
PROsampling(samples, pairs, pair_threshold, max_pairs);
- npairs += pairs.size();
+ int cur_npairs = pairs.size();
+ npairs += cur_npairs;
+
+ score_t kbest_loss_first, kbest_loss_last = 0.0;
- SparseVector<weight_t> lambdas_copy;
+ for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();
+ it != pairs.end(); it++) {
+ score_t model_diff = it->first.model - it->second.model;
+ kbest_loss_first += max(0.0, -1.0 * model_diff);
+ }
+
+ for (int ki=0; ki < repeat; ki++) {
+
+ score_t kbest_loss = 0.0; // test-k-best
+ SparseVector<weight_t> lambdas_copy; // for l1 regularization
+ SparseVector<weight_t> sum_up; // for pclr
if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();
it != pairs.end(); it++) {
- bool rank_error;
+ score_t model_diff = it->first.model - it->second.model;
+ if (repeat > 1) {
+ model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f);
+ kbest_loss += max(0.0, -1.0 * model_diff);
+ }
+ bool rank_error = false;
score_t margin;
if (faster_perceptron) { // we only have considering misranked pairs
rank_error = true; // pair sampling already did this for us
margin = std::numeric_limits<float>::max();
} else {
- rank_error = it->first.model <= it->second.model;
- margin = fabs(it->first.model - it->second.model);
+ rank_error = model_diff<=0.0;
+ margin = fabs(model_diff);
if (!rank_error && margin < loss_margin) margin_violations++;
}
- if (rank_error) rank_errors++;
+ if (rank_error && ki==1) rank_errors++;
if (scale_bleu_diff) eta = it->first.score - it->second.score;
if (rank_error || margin < loss_margin) {
SparseVector<weight_t> diff_vec = it->first.f - it->second.f;
- lambdas.plus_eq_v_times_s(diff_vec, eta);
- if (gamma)
- lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
+ if (batch) {
+ batch_loss += max(0., -1.0*model_diff);
+ batch_updates += diff_vec;
+ continue;
+ }
+ if (pclr != "no") {
+ sum_up += diff_vec;
+ } else {
+ lambdas.plus_eq_v_times_s(diff_vec, eta);
+ if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./cur_npairs));
+ }
+ }
+ }
+
+ // per-coordinate learning rate
+ if (pclr != "no") {
+ SparseVector<weight_t>::iterator it = sum_up.begin();
+ for (; it != sum_up.end(); ++it) {
+ if (pclr == "simple") {
+ lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]);
+ learning_rates[it->first]++;
+ } else if (pclr == "adagrad") {
+ if (learning_rates[it->first] == 0) {
+ lambdas[it->first] += it->second * eta;
+ } else {
+ lambdas[it->first] += it->second * eta * learning_rates[it->first];
+ }
+ learning_rates[it->first] += pow(it->second, 2.0);
+ }
}
}
@@ -395,14 +484,16 @@ main(int argc, char** argv)
// please note that this regularizations happen
// after a _sentence_ -- not after each example/pair!
if (l1naive) {
- FastSparseVector<weight_t>::iterator it = lambdas.begin();
+ SparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
+ it->second *= max(0.0000001, eta/(eta+learning_rates[it->first])); // FIXME
+ learning_rates[it->first]++;
it->second -= sign(it->second) * l1_reg;
}
}
} else if (l1clip) {
- FastSparseVector<weight_t>::iterator it = lambdas.begin();
+ SparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
if (it->second != 0) {
@@ -417,7 +508,7 @@ main(int argc, char** argv)
}
} else if (l1cumul) {
weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input
- FastSparseVector<weight_t>::iterator it = lambdas.begin();
+ SparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
if (it->second != 0) {
@@ -435,7 +526,28 @@ main(int argc, char** argv)
}
}
- }
+ if (ki==repeat-1) { // done
+ kbest_loss_last = kbest_loss;
+ if (repeat > 1) {
+ score_t best_score = -1.;
+ score_t best_model = -std::numeric_limits<score_t>::max();
+ unsigned best_idx;
+ for (unsigned i=0; i < samples->size(); i++) {
+ score_t s = lambdas.dot((*samples)[i].f);
+ if (s > best_model) {
+ best_idx = i;
+ best_model = s;
+ }
+ }
+ score_sum += (*samples)[best_idx].score;
+ model_sum += best_model;
+ }
+ }
+ } // repeat
+
+ if ((kbest_loss_first - kbest_loss_last) >= 0) kbest_loss_improve++;
+
+ } // noup
if (rescale) lambdas /= lambdas.l2norm();
@@ -443,14 +555,19 @@ main(int argc, char** argv)
} // input loop
- if (average) w_average += lambdas;
+ if (t == 0) in_sz = ii; // remember size of input (# lines)
- if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset();
- if (t == 0) {
- in_sz = ii; // remember size of input (# lines)
+ if (batch) {
+ lambdas.plus_eq_v_times_s(batch_updates, eta);
+ if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
+ batch_updates.clear();
}
+ if (average) w_average += lambdas;
+
+ if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset();
+
// print some stats
score_t score_avg = score_sum/(score_t)in_sz;
score_t model_avg = model_sum/(score_t)in_sz;
@@ -477,13 +594,15 @@ main(int argc, char** argv)
cerr << _np << " 1best avg model score: " << model_avg;
cerr << _p << " (" << model_diff << ")" << endl;
cerr << " avg # pairs: ";
- cerr << _np << npairs/(float)in_sz;
+ cerr << _np << npairs/(float)in_sz << endl;
+ cerr << " avg # rank err: ";
+ cerr << rank_errors/(float)in_sz;
if (faster_perceptron) cerr << " (meaningless)";
cerr << endl;
- cerr << " avg # rank err: ";
- cerr << rank_errors/(float)in_sz << endl;
cerr << " avg # margin viol: ";
cerr << margin_violations/(float)in_sz << endl;
+ if (batch) cerr << " batch loss: " << batch_loss << endl;
+ cerr << " k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
cerr << " non0 feature count: " << nonz << endl;
cerr << " avg list sz: " << list_sz/(float)in_sz << endl;
cerr << " avg f count: " << f_count/(float)list_sz << endl;
@@ -510,9 +629,9 @@ main(int argc, char** argv)
// write weights to file
if (select_weights == "best" || keep) {
- lambdas.init_vector(&dense_weights);
+ lambdas.init_vector(&decoder_weights);
string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz";
- Weights::WriteToFile(w_fn, dense_weights, true);
+ Weights::WriteToFile(w_fn, decoder_weights, true);
}
} // outer loop
diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h
index 3981fb39..ccb5ad4d 100644
--- a/training/dtrain/dtrain.h
+++ b/training/dtrain/dtrain.h
@@ -9,6 +9,8 @@
#include <string.h>
#include <boost/algorithm/string.hpp>
+#include <boost/regex.hpp>
+#include <boost/algorithm/string/regex.hpp>
#include <boost/program_options.hpp>
#include "decoder.h"
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 23e94285..fc83f08e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,5 +1,6 @@
-input=./nc-wmt11.de.gz
-refs=./nc-wmt11.en.gz
+#input=./nc-wmt11.de.gz
+#refs=./nc-wmt11.en.gz
+bitext=./nc-wmt11.gz
output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
@@ -10,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
-epochs=2 # run over input 2 times
+epochs=3 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron)
+learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
@@ -22,3 +23,5 @@ pair_sampling=XYX #
hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (here: > 0)
loss_margin=0 # update if correctly ranked, but within this margin
+repeat=1 # repeat training on a kbest list 1 times
+#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index 21f91244..75f47337 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,17 +4,18 @@ Reading ./nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Example feature: Shape_S00000_T00000
-Seeding random number sequence to 970626287
+Seeding random number sequence to 3751911392
dtrain
Parameters:
k 100
N 4
- T 2
+ T 3
+ batch 0
scorer 'fixed_stupid_bleu'
sample from 'kbest'
filter 'uniq'
- learning rate 1
+ learning rate 0.1
gamma 0
loss margin 0
faster perceptron 1
@@ -23,69 +24,99 @@ Parameters:
pair threshold 0
select weights 'VOID'
l1 reg 0 'none'
+ pclr no
max pairs 4294967295
+ repeat 1
cdec cfg './cdec.ini'
- input './nc-wmt11.de.gz'
- refs './nc-wmt11.en.gz'
+ input './nc-wmt11.gz'
output '-'
stop_after 10
(a dot represents 10 inputs)
-Iteration #1 of 2.
+Iteration #1 of 3.
. 10
Stopping after 10 input sentences.
WEIGHTS
- Glue = -614
- WordPenalty = +1256.8
- LanguageModel = +5610.5
- LanguageModel_OOV = -1449
- PhraseModel_0 = -2107
- PhraseModel_1 = -4666.1
- PhraseModel_2 = -2713.5
- PhraseModel_3 = +4204.3
- PhraseModel_4 = -1435.8
- PhraseModel_5 = +916
- PhraseModel_6 = +190
- PassThrough = -2527
+ Glue = -110
+ WordPenalty = -8.2082
+ LanguageModel = -319.91
+ LanguageModel_OOV = -19.2
+ PhraseModel_0 = +312.82
+ PhraseModel_1 = -161.02
+ PhraseModel_2 = -433.65
+ PhraseModel_3 = +291.03
+ PhraseModel_4 = +252.32
+ PhraseModel_5 = +50.6
+ PhraseModel_6 = +146.7
+ PassThrough = -38.7
---
- 1best avg score: 0.17874 (+0.17874)
- 1best avg model score: 88399 (+88399)
- avg # pairs: 798.2 (meaningless)
- avg # rank err: 798.2
+ 1best avg score: 0.16966 (+0.16966)
+ 1best avg model score: 29874 (+29874)
+ avg # pairs: 906.3
+ avg # rank err: 0 (meaningless)
avg # margin viol: 0
- non0 feature count: 887
+ k-best loss imp: 100%
+ non0 feature count: 832
avg list sz: 91.3
- avg f count: 126.85
-(time 0.33 min, 2 s/S)
+ avg f count: 139.77
+(time 0.35 min, 2.1 s/S)
-Iteration #2 of 2.
+Iteration #2 of 3.
. 10
WEIGHTS
- Glue = -1025
- WordPenalty = +1751.5
- LanguageModel = +10059
- LanguageModel_OOV = -4490
- PhraseModel_0 = -2640.7
- PhraseModel_1 = -3757.4
- PhraseModel_2 = -1133.1
- PhraseModel_3 = +1837.3
- PhraseModel_4 = -3534.3
- PhraseModel_5 = +2308
- PhraseModel_6 = +1677
- PassThrough = -6222
+ Glue = -122.1
+ WordPenalty = +83.689
+ LanguageModel = +233.23
+ LanguageModel_OOV = -145.1
+ PhraseModel_0 = +150.72
+ PhraseModel_1 = -272.84
+ PhraseModel_2 = -418.36
+ PhraseModel_3 = +181.63
+ PhraseModel_4 = -289.47
+ PhraseModel_5 = +140.3
+ PhraseModel_6 = +3.5
+ PassThrough = -109.7
---
- 1best avg score: 0.30764 (+0.12891)
- 1best avg model score: -2.5042e+05 (-3.3882e+05)
- avg # pairs: 725.9 (meaningless)
- avg # rank err: 725.9
+ 1best avg score: 0.17399 (+0.004325)
+ 1best avg model score: 4936.9 (-24937)
+ avg # pairs: 662.4
+ avg # rank err: 0 (meaningless)
avg # margin viol: 0
- non0 feature count: 1499
+ k-best loss imp: 100%
+ non0 feature count: 1240
avg list sz: 91.3
- avg f count: 114.34
-(time 0.32 min, 1.9 s/S)
+ avg f count: 125.11
+(time 0.27 min, 1.6 s/S)
+
+Iteration #3 of 3.
+ . 10
+WEIGHTS
+ Glue = -157.4
+ WordPenalty = -1.7372
+ LanguageModel = +686.18
+ LanguageModel_OOV = -399.7
+ PhraseModel_0 = -39.876
+ PhraseModel_1 = -341.96
+ PhraseModel_2 = -318.67
+ PhraseModel_3 = +105.08
+ PhraseModel_4 = -290.27
+ PhraseModel_5 = -48.6
+ PhraseModel_6 = -43.6
+ PassThrough = -298.5
+ ---
+ 1best avg score: 0.30742 (+0.13343)
+ 1best avg model score: -15393 (-20329)
+ avg # pairs: 623.8
+ avg # rank err: 0 (meaningless)
+ avg # margin viol: 0
+ k-best loss imp: 100%
+ non0 feature count: 1776
+ avg list sz: 91.3
+ avg f count: 118.58
+(time 0.28 min, 1.7 s/S)
Writing weights file to '-' ...
done
---
-Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764].
-This took 0.65 min.
+Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742].
+This took 0.9 min.
diff --git a/training/dtrain/examples/standard/nc-wmt11.gz b/training/dtrain/examples/standard/nc-wmt11.gz
new file mode 100644
index 00000000..c39c5aef
--- /dev/null
+++ b/training/dtrain/examples/standard/nc-wmt11.gz
Binary files differ
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 285f3c9b..60ca9422 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -21,6 +21,8 @@ opts = Trollop::options do
opt :qsub, "use qsub", :type => :bool, :default => false
opt :dtrain_binary, "path to dtrain binary", :type => :string
opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
+ opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o'
+ opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w'
end
usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
@@ -41,9 +43,11 @@ epochs = opts[:epochs]
rand = opts[:randomize]
reshard = opts[:reshard]
predefined_shards = false
+per_shard_decoder_configs = false
if opts[:shards] == 0
predefined_shards = true
num_shards = 0
+ per_shard_decoder_configs = true if opts[:per_shard_decoder_configs]
else
num_shards = opts[:shards]
end
@@ -51,6 +55,7 @@ input = opts[:input]
refs = opts[:references]
use_qsub = opts[:qsub]
shards_at_once = opts[:processes_at_once]
+first_input_weights = opts[:first_input_weights]
`mkdir work`
@@ -101,6 +106,9 @@ refs_files = []
if predefined_shards
input_files = File.new(input).readlines.map {|i| i.strip }
refs_files = File.new(refs).readlines.map {|i| i.strip }
+ if per_shard_decoder_configs
+ decoder_configs = File.new(opts[:per_shard_decoder_configs]).readlines.map {|i| i.strip}
+ end
num_shards = input_files.size
else
input_files, refs_files = make_shards input, refs, num_shards, 0, rand
@@ -126,10 +134,18 @@ end
else
local_end = "2>work/out.#{shard}.#{epoch}"
end
+ if per_shard_decoder_configs
+ cdec_cfg = "--decoder_config #{decoder_configs[shard]}"
+ else
+ cdec_cfg = ""
+ end
+ if first_input_weights!='' && epoch == 0
+ input_weights = "--input_weights #{first_input_weights}"
+ end
pids << Kernel.fork {
- `#{qsub_str_start}#{dtrain_bin} -c #{ini}\
+ `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\
--input #{input_files[shard]}\
- --refs #{refs_files[shard]} #{input_weights}\
+ --refs #{refs_files[shard]}\
--output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`
}
weights_files << "work/weights.#{shard}.#{epoch}"
diff --git a/training/latent_svm/latent_svm.cc b/training/latent_svm/latent_svm.cc
index ab9c1d5d..60e52550 100644
--- a/training/latent_svm/latent_svm.cc
+++ b/training/latent_svm/latent_svm.cc
@@ -32,7 +32,6 @@ total_loss and prev_loss actually refer not to loss, but the metric (usually BLE
#include "sampler.h"
using namespace std;
-using boost::shared_ptr;
namespace po = boost::program_options;
bool invert_score;
@@ -128,7 +127,7 @@ struct HypothesisInfo {
};
struct GoodOracle {
- shared_ptr<HypothesisInfo> good;
+ boost::shared_ptr<HypothesisInfo> good;
};
struct TrainingObserver : public DecoderObserver {
@@ -143,9 +142,9 @@ struct TrainingObserver : public DecoderObserver {
const DocScorer& ds;
const vector<weight_t>& feature_weights;
vector<GoodOracle>& oracles;
- shared_ptr<HypothesisInfo> cur_best;
- shared_ptr<HypothesisInfo> cur_costaug_best;
- shared_ptr<HypothesisInfo> cur_ref;
+ boost::shared_ptr<HypothesisInfo> cur_best;
+ boost::shared_ptr<HypothesisInfo> cur_costaug_best;
+ boost::shared_ptr<HypothesisInfo> cur_ref;
const int kbest_size;
const double mt_metric_scale;
const double mu;
@@ -168,8 +167,8 @@ struct TrainingObserver : public DecoderObserver {
UpdateOracles(smeta.GetSentenceID(), *hg);
}
- shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double metric) {
- shared_ptr<HypothesisInfo> h(new HypothesisInfo);
+ boost::shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double metric) {
+ boost::shared_ptr<HypothesisInfo> h(new HypothesisInfo);
h->features = feats;
h->mt_metric_score = metric;
return h;
diff --git a/training/mira/kbest_cut_mira.cc b/training/mira/kbest_cut_mira.cc
index 59fa860a..990609d7 100644
--- a/training/mira/kbest_cut_mira.cc
+++ b/training/mira/kbest_cut_mira.cc
@@ -30,7 +30,6 @@
#include "sparse_vector.h"
using namespace std;
-using boost::shared_ptr;
namespace po = boost::program_options;
bool invert_score;
@@ -50,13 +49,6 @@ bool sent_approx;
bool checkloss;
bool stream;
-void SanityCheck(const vector<double>& w) {
- for (int i = 0; i < w.size(); ++i) {
- assert(!isnan(w[i]));
- assert(!isinf(w[i]));
- }
-}
-
struct FComp {
const vector<double>& w_;
FComp(const vector<double>& w) : w_(w) {}
@@ -149,7 +141,7 @@ struct HypothesisInfo {
double alpha;
double oracle_loss;
SparseVector<double> oracle_feat_diff;
- shared_ptr<HypothesisInfo> oracleN;
+ boost::shared_ptr<HypothesisInfo> oracleN;
};
bool ApproxEqual(double a, double b) {
@@ -157,7 +149,7 @@ bool ApproxEqual(double a, double b) {
return (fabs(a-b)/fabs(b)) < EPSILON;
}
-typedef shared_ptr<HypothesisInfo> HI;
+typedef boost::shared_ptr<HypothesisInfo> HI;
bool HypothesisCompareB(const HI& h1, const HI& h2 )
{
return h1->mt_metric > h2->mt_metric;
@@ -185,11 +177,11 @@ bool HypothesisCompareG(const HI& h1, const HI& h2 )
};
-void CuttingPlane(vector<shared_ptr<HypothesisInfo> >* cur_c, bool* again, vector<shared_ptr<HypothesisInfo> >& all_hyp, vector<weight_t> dense_weights)
+void CuttingPlane(vector<boost::shared_ptr<HypothesisInfo> >* cur_c, bool* again, vector<boost::shared_ptr<HypothesisInfo> >& all_hyp, vector<weight_t> dense_weights)
{
bool DEBUG_CUT = false;
- shared_ptr<HypothesisInfo> max_fear, max_fear_in_set;
- vector<shared_ptr<HypothesisInfo> >& cur_constraint = *cur_c;
+ boost::shared_ptr<HypothesisInfo> max_fear, max_fear_in_set;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_constraint = *cur_c;
if(no_reweight)
{
@@ -235,9 +227,9 @@ void CuttingPlane(vector<shared_ptr<HypothesisInfo> >* cur_c, bool* again, vecto
}
-double ComputeDelta(vector<shared_ptr<HypothesisInfo> >* cur_p, double max_step_size,vector<weight_t> dense_weights )
+double ComputeDelta(vector<boost::shared_ptr<HypothesisInfo> >* cur_p, double max_step_size,vector<weight_t> dense_weights )
{
- vector<shared_ptr<HypothesisInfo> >& cur_pair = *cur_p;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_pair = *cur_p;
double loss = cur_pair[0]->oracle_loss - cur_pair[1]->oracle_loss;
double margin = -(cur_pair[0]->oracleN->features.dot(dense_weights)- cur_pair[0]->features.dot(dense_weights)) + (cur_pair[1]->oracleN->features.dot(dense_weights) - cur_pair[1]->features.dot(dense_weights));
@@ -261,12 +253,12 @@ double ComputeDelta(vector<shared_ptr<HypothesisInfo> >* cur_p, double max_step_
}
-vector<shared_ptr<HypothesisInfo> > SelectPair(vector<shared_ptr<HypothesisInfo> >* cur_c)
+vector<boost::shared_ptr<HypothesisInfo> > SelectPair(vector<boost::shared_ptr<HypothesisInfo> >* cur_c)
{
bool DEBUG_SELECT= false;
- vector<shared_ptr<HypothesisInfo> >& cur_constraint = *cur_c;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_constraint = *cur_c;
- vector<shared_ptr<HypothesisInfo> > pair;
+ vector<boost::shared_ptr<HypothesisInfo> > pair;
if (no_select || optimizer == 2){ //skip heuristic search and return oracle and fear for pa-mira
@@ -278,7 +270,7 @@ vector<shared_ptr<HypothesisInfo> > SelectPair(vector<shared_ptr<HypothesisInfo>
for(int u=0;u != cur_constraint.size();u++)
{
- shared_ptr<HypothesisInfo> max_fear;
+ boost::shared_ptr<HypothesisInfo> max_fear;
if(DEBUG_SELECT) cerr<< "cur alpha " << u << " " << cur_constraint[u]->alpha;
for(int i=0; i < cur_constraint.size();i++) //select maximal violator
@@ -323,8 +315,8 @@ vector<shared_ptr<HypothesisInfo> > SelectPair(vector<shared_ptr<HypothesisInfo>
}
struct GoodBadOracle {
- vector<shared_ptr<HypothesisInfo> > good;
- vector<shared_ptr<HypothesisInfo> > bad;
+ vector<boost::shared_ptr<HypothesisInfo> > good;
+ vector<boost::shared_ptr<HypothesisInfo> > bad;
};
struct BasicObserver: public DecoderObserver {
@@ -367,8 +359,8 @@ struct TrainingObserver : public DecoderObserver {
const DocScorer& ds;
vector<ScoreP>& corpus_bleu_sent_stats;
vector<GoodBadOracle>& oracles;
- vector<shared_ptr<HypothesisInfo> > cur_best;
- shared_ptr<HypothesisInfo> cur_oracle;
+ vector<boost::shared_ptr<HypothesisInfo> > cur_best;
+ boost::shared_ptr<HypothesisInfo> cur_oracle;
const int kbest_size;
Hypergraph forest;
int cur_sent;
@@ -386,7 +378,7 @@ struct TrainingObserver : public DecoderObserver {
return *cur_best[0];
}
- const vector<shared_ptr<HypothesisInfo> > GetCurrentBest() const {
+ const vector<boost::shared_ptr<HypothesisInfo> > GetCurrentBest() const {
return cur_best;
}
@@ -411,8 +403,8 @@ struct TrainingObserver : public DecoderObserver {
}
- shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score, const vector<WordID>& hyp) {
- shared_ptr<HypothesisInfo> h(new HypothesisInfo);
+ boost::shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score, const vector<WordID>& hyp) {
+ boost::shared_ptr<HypothesisInfo> h(new HypothesisInfo);
h->features = feats;
h->mt_metric = score;
h->hyp = hyp;
@@ -424,14 +416,14 @@ struct TrainingObserver : public DecoderObserver {
if (stream) sent_id = 0;
bool PRINT_LIST= false;
- vector<shared_ptr<HypothesisInfo> >& cur_good = oracles[sent_id].good;
- vector<shared_ptr<HypothesisInfo> >& cur_bad = oracles[sent_id].bad;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_good = oracles[sent_id].good;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_bad = oracles[sent_id].bad;
//TODO: look at keeping previous iterations hypothesis lists around
cur_best.clear();
cur_good.clear();
cur_bad.clear();
- vector<shared_ptr<HypothesisInfo> > all_hyp;
+ vector<boost::shared_ptr<HypothesisInfo> > all_hyp;
typedef KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,Filter> K;
K kbest(forest,kbest_size);
@@ -527,7 +519,7 @@ struct TrainingObserver : public DecoderObserver {
if(PRINT_LIST) { cerr << "GOOD" << endl; for(int u=0;u!=cur_good.size();u++) cerr << cur_good[u]->mt_metric << " " << cur_good[u]->hope << endl;}
//use hope for fear selection
- shared_ptr<HypothesisInfo>& oracleN = cur_good[0];
+ boost::shared_ptr<HypothesisInfo>& oracleN = cur_good[0];
if(fear_select == 1){ //compute fear hyps with model - bleu
if (PRINT_LIST) cerr << "FEAR " << endl;
@@ -663,13 +655,13 @@ int main(int argc, char** argv) {
invert_score = false;
}
- shared_ptr<DocScorer> ds;
+ boost::shared_ptr<DocScorer> ds;
//normal: load references, stream: start stream scorer
if (stream) {
- ds = shared_ptr<DocScorer>(new DocStreamScorer(type, vector<string>(0), ""));
+ ds = boost::shared_ptr<DocScorer>(new DocStreamScorer(type, vector<string>(0), ""));
cerr << "Scoring doc stream with " << metric_name << endl;
} else {
- ds = shared_ptr<DocScorer>(new DocScorer(type, conf["reference"].as<vector<string> >(), ""));
+ ds = boost::shared_ptr<DocScorer>(new DocScorer(type, conf["reference"].as<vector<string> >(), ""));
cerr << "Loaded " << ds->size() << " references for scoring with " << metric_name << endl;
}
vector<ScoreP> corpus_bleu_sent_stats;
@@ -774,9 +766,9 @@ int main(int argc, char** argv) {
const HypothesisInfo& cur_good = *oracles[cur_sent].good[0];
const HypothesisInfo& cur_bad = *oracles[cur_sent].bad[0];
- vector<shared_ptr<HypothesisInfo> >& cur_good_v = oracles[cur_sent].good;
- vector<shared_ptr<HypothesisInfo> >& cur_bad_v = oracles[cur_sent].bad;
- vector<shared_ptr<HypothesisInfo> > cur_best_v = observer.GetCurrentBest();
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_good_v = oracles[cur_sent].good;
+ vector<boost::shared_ptr<HypothesisInfo> >& cur_bad_v = oracles[cur_sent].bad;
+ vector<boost::shared_ptr<HypothesisInfo> > cur_best_v = observer.GetCurrentBest();
tot_loss += cur_hyp.mt_metric;
@@ -824,13 +816,13 @@ int main(int argc, char** argv) {
}
else if(optimizer == 5) //full mira with n-best list of constraints from hope, fear, model best
{
- vector<shared_ptr<HypothesisInfo> > cur_constraint;
+ vector<boost::shared_ptr<HypothesisInfo> > cur_constraint;
cur_constraint.insert(cur_constraint.begin(), cur_bad_v.begin(), cur_bad_v.end());
cur_constraint.insert(cur_constraint.begin(), cur_best_v.begin(), cur_best_v.end());
cur_constraint.insert(cur_constraint.begin(), cur_good_v.begin(), cur_good_v.end());
bool optimize_again;
- vector<shared_ptr<HypothesisInfo> > cur_pair;
+ vector<boost::shared_ptr<HypothesisInfo> > cur_pair;
//SMO
for(int u=0;u!=cur_constraint.size();u++)
cur_constraint[u]->alpha =0;
@@ -879,7 +871,7 @@ int main(int argc, char** argv) {
else if(optimizer == 2 || optimizer == 3) //PA and Cutting Plane MIRA update
{
bool DEBUG_SMO= true;
- vector<shared_ptr<HypothesisInfo> > cur_constraint;
+ vector<boost::shared_ptr<HypothesisInfo> > cur_constraint;
cur_constraint.push_back(cur_good_v[0]); //add oracle to constraint set
bool optimize_again = true;
int cut_plane_calls = 0;
@@ -919,7 +911,7 @@ int main(int argc, char** argv) {
while (iter < smo_iter)
{
//select pair to optimize from constraint set
- vector<shared_ptr<HypothesisInfo> > cur_pair = SelectPair(&cur_constraint);
+ vector<boost::shared_ptr<HypothesisInfo> > cur_pair = SelectPair(&cur_constraint);
if(cur_pair.empty()){
iter=MAX_SMO;
diff --git a/training/mira/kbest_mira.cc b/training/mira/kbest_mira.cc
index d59b4224..2868de0c 100644
--- a/training/mira/kbest_mira.cc
+++ b/training/mira/kbest_mira.cc
@@ -3,10 +3,10 @@
#include <vector>
#include <cassert>
#include <cmath>
-#include <tr1/memory>
#include <boost/program_options.hpp>
#include <boost/program_options/variables_map.hpp>
+#include <boost/shared_ptr.hpp>
#include "stringlib.h"
#include "hg_sampler.h"
@@ -30,7 +30,7 @@ using namespace std;
namespace po = boost::program_options;
bool invert_score;
-std::tr1::shared_ptr<MT19937> rng;
+boost::shared_ptr<MT19937> rng;
void RandomPermutation(int len, vector<int>* p_ids) {
vector<int>& ids = *p_ids;
@@ -88,8 +88,8 @@ struct HypothesisInfo {
};
struct GoodBadOracle {
- std::tr1::shared_ptr<HypothesisInfo> good;
- std::tr1::shared_ptr<HypothesisInfo> bad;
+ boost::shared_ptr<HypothesisInfo> good;
+ boost::shared_ptr<HypothesisInfo> bad;
};
struct TrainingObserver : public DecoderObserver {
@@ -97,7 +97,7 @@ struct TrainingObserver : public DecoderObserver {
const DocumentScorer& ds;
const EvaluationMetric& metric;
vector<GoodBadOracle>& oracles;
- std::tr1::shared_ptr<HypothesisInfo> cur_best;
+ boost::shared_ptr<HypothesisInfo> cur_best;
const int kbest_size;
const bool sample_forest;
@@ -109,16 +109,16 @@ struct TrainingObserver : public DecoderObserver {
UpdateOracles(smeta.GetSentenceID(), *hg);
}
- std::tr1::shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score) {
- std::tr1::shared_ptr<HypothesisInfo> h(new HypothesisInfo);
+ boost::shared_ptr<HypothesisInfo> MakeHypothesisInfo(const SparseVector<double>& feats, const double score) {
+ boost::shared_ptr<HypothesisInfo> h(new HypothesisInfo);
h->features = feats;
h->mt_metric = score;
return h;
}
void UpdateOracles(int sent_id, const Hypergraph& forest) {
- std::tr1::shared_ptr<HypothesisInfo>& cur_good = oracles[sent_id].good;
- std::tr1::shared_ptr<HypothesisInfo>& cur_bad = oracles[sent_id].bad;
+ boost::shared_ptr<HypothesisInfo>& cur_good = oracles[sent_id].good;
+ boost::shared_ptr<HypothesisInfo>& cur_bad = oracles[sent_id].bad;
cur_bad.reset(); // TODO get rid of??
if (sample_forest) {
diff --git a/training/mira/mira.py b/training/mira/mira.py
index 29c51e1d..d5a1d9f8 100755
--- a/training/mira/mira.py
+++ b/training/mira/mira.py
@@ -4,8 +4,19 @@ import subprocess, shlex, glob
import argparse
import logging
import random, time
-import cdec.score
import gzip, itertools
+try:
+ import cdec.score
+except ImportError:
+ sys.stderr.write('Could not import pycdec, see cdec/python/README.md for details\n')
+ sys.exit(1)
+have_mpl = True
+try:
+ import matplotlib
+ matplotlib.use('Agg')
+ import matplotlib.pyplot as plt
+except ImportError:
+ have_mpl = False
#mira run script
#requires pycdec to be built, since it is used for scoring hypothesis
@@ -16,17 +27,17 @@ import gzip, itertools
#scoring function using pycdec scoring
def fast_score(hyps, refs, metric):
scorer = cdec.score.Scorer(metric)
- logging.info('loaded {0} references for scoring with {1}\n'.format(
+ logging.info('loaded {0} references for scoring with {1}'.format(
len(refs), metric))
if metric=='BLEU':
logging.warning('BLEU is ambiguous, assuming IBM_BLEU\n')
metric = 'IBM_BLEU'
elif metric=='COMBI':
logging.warning('COMBI metric is no longer supported, switching to '
- 'COMB:TER=-0.5;BLEU=0.5\n')
+ 'COMB:TER=-0.5;BLEU=0.5')
metric = 'COMB:TER=-0.5;BLEU=0.5'
stats = sum(scorer(r).evaluate(h) for h,r in itertools.izip(hyps,refs))
- logging.info(stats.detail+'\n')
+ logging.info('Score={} ({})'.format(stats.score, stats.detail))
return stats.score
#create new parallel input file in output directory in sgml format
@@ -71,6 +82,8 @@ def main():
#set logging to write all info messages to stderr
logging.basicConfig(level=logging.INFO)
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+ if not have_mpl:
+ logging.warning('Failed to import matplotlib, graphs will not be generated.')
parser= argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -181,10 +194,11 @@ def main():
dev_size = enseg(args.devset, newdev, args.grammar_prefix)
args.devset = newdev
- write_config(args)
+ log_config(args)
args.weights, hope_best_fear = optimize(args, script_dir, dev_size)
- graph_file = graph(args.output_dir, hope_best_fear, args.metric)
+ graph_file = ''
+ if have_mpl: graph_file = graph(args.output_dir, hope_best_fear, args.metric)
dev_results, dev_bleu = evaluate(args.devset, args.weights, args.config,
script_dir, args.output_dir)
@@ -205,17 +219,12 @@ def main():
if graph_file:
logging.info('A graph of the best/hope/fear scores over the iterations '
- 'has been saved to {}\n'.format(graph_file))
+ 'has been saved to {}'.format(graph_file))
print 'final weights:\n{}\n'.format(args.weights)
#graph of hope/best/fear metric values across all iterations
def graph(output_dir, hope_best_fear, metric):
- try:
- import matplotlib.pyplot as plt
- except ImportError:
- logging.error('Error importing matplotlib. Graphing disabled.\n')
- return ''
max_y = float(max(hope_best_fear['best']))*1.5
plt.plot(hope_best_fear['best'], label='best')
plt.plot(hope_best_fear['hope'], label='hope')
@@ -308,6 +317,7 @@ def optimize(args, script_dir, dev_size):
decoder = script_dir+'/kbest_cut_mira'
(source, refs) = split_devset(args.devset, args.output_dir)
port = random.randint(15000,50000)
+ logging.info('using port {}'.format(port))
num_features = 0
last_p_score = 0
best_score_iter = -1
@@ -316,8 +326,8 @@ def optimize(args, script_dir, dev_size):
hope_best_fear = {'hope':[],'best':[],'fear':[]}
#main optimization loop
while i<args.max_iterations:
- logging.info('\n\nITERATION {}\n========\n'.format(i))
- logging.info('using port {}\n'.format(port))
+ logging.info('======= STARTING ITERATION {} ======='.format(i))
+ logging.info('Starting at {}'.format(time.asctime()))
#iteration specific files
runfile = args.output_dir+'/run.raw.'+str(i)
@@ -327,10 +337,8 @@ def optimize(args, script_dir, dev_size):
weightdir = args.output_dir+'/weights.pass'+str(i)
os.mkdir(logdir)
os.mkdir(weightdir)
-
- logging.info('RUNNING DECODER AT {}'.format(time.asctime()))
weightsfile = args.output_dir+'/weights.'+str(i)
- logging.info('ITER {}\n'.format(i))
+ logging.info(' log directory={}'.format(logdir))
curr_pass = '0{}'.format(i)
decoder_cmd = ('{0} -c {1} -w {2} -r{3} -m {4} -s {5} -b {6} -k {7} -o {8}'
' -p {9} -O {10} -D {11} -h {12} -f {13} -C {14}').format(
@@ -350,7 +358,7 @@ def optimize(args, script_dir, dev_size):
parallelize, logdir, args.jobs)
cmd = parallel_cmd + ' ' + decoder_cmd
- logging.info('COMMAND: \n{}\n'.format(cmd))
+ logging.info('OPTIMIZATION COMMAND: {}'.format(cmd))
dlog = open(decoderlog,'w')
runf = open(runfile,'w')
@@ -365,27 +373,26 @@ def optimize(args, script_dir, dev_size):
p1.stdout.close()
if exit_code:
- logging.error('Failed with exit code {}\n'.format(exit_code))
+ logging.error('Failed with exit code {}'.format(exit_code))
sys.exit(exit_code)
try:
f = open(runfile)
except IOError, msg:
- logging.error('Unable to open {}\n'.format(runfile))
+ logging.error('Unable to open {}'.format(runfile))
sys.exit()
num_topbest = sum(1 for line in f)
f.close()
if num_topbest == dev_size: break
- logging.warning('Incorrect number of top best. '
- 'Waiting for distributed filesystem and retrying.')
+ logging.warning('Incorrect number of top best. Sleeping for 10 seconds and retrying...')
time.sleep(10)
retries += 1
if dev_size != num_topbest:
logging.error("Dev set contains "+dev_size+" sentences, but we don't "
"have topbest for all of these. Decoder failure? "
- " Check "+decoderlog+'\n')
+ " Check "+decoderlog)
sys.exit()
dlog.close()
runf.close()
@@ -427,7 +434,7 @@ def optimize(args, script_dir, dev_size):
hope_best_fear['hope'].append(dec_score)
hope_best_fear['best'].append(dec_score_h)
hope_best_fear['fear'].append(dec_score_f)
- logging.info('DECODER SCORE: {0} HOPE: {1} FEAR: {2}\n'.format(
+ logging.info('DECODER SCORE: {0} HOPE: {1} FEAR: {2}'.format(
dec_score, dec_score_h, dec_score_f))
if dec_score > best_score:
best_score_iter = i
@@ -436,12 +443,13 @@ def optimize(args, script_dir, dev_size):
new_weights_file = '{}/weights.{}'.format(args.output_dir, i+1)
last_weights_file = '{}/weights.{}'.format(args.output_dir, i)
i += 1
- weight_files = weightdir+'/weights.mira-pass*.*[0-9].gz'
+ weight_files = args.output_dir+'/weights.pass*/weights.mira-pass*[0-9].gz'
average_weights(new_weights_file, weight_files)
- logging.info('\nBEST ITER: {} :: {}\n\n'.format(
+ logging.info('BEST ITERATION: {} (SCORE={})'.format(
best_score_iter, best_score))
weights_final = args.output_dir+'/weights.final'
+ logging.info('WEIGHTS FILE: {}'.format(weights_final))
shutil.copy(last_weights_file, weights_final)
average_final_weights(args.output_dir)
@@ -481,15 +489,15 @@ def gzip_file(filename):
#average the weights for a given pass
def average_weights(new_weights, weight_files):
- logging.info('AVERAGE {} {}\n'.format(new_weights, weight_files))
+ logging.info('AVERAGE {} {}'.format(new_weights, weight_files))
feature_weights = {}
total_mult = 0.0
for path in glob.glob(weight_files):
score = gzip.open(path)
mult = 0
- logging.info('FILE {}\n'.format(path))
+ logging.info(' FILE {}'.format(path))
msg, ran, mult = score.readline().strip().split(' ||| ')
- logging.info('Processing {} {}'.format(ran, mult))
+ logging.info(' Processing {} {}'.format(ran, mult))
for line in score:
f,w = line.split(' ',1)
if f in feature_weights:
@@ -500,34 +508,30 @@ def average_weights(new_weights, weight_files):
score.close()
#write new weights to outfile
+ logging.info('Writing averaged weights to {}'.format(new_weights))
out = open(new_weights, 'w')
for f in iter(feature_weights):
avg = feature_weights[f]/total_mult
- logging.info('{} {} {} ||| Printing {} {}\n'.format(f,feature_weights[f],
- total_mult, f, avg))
out.write('{} {}\n'.format(f,avg))
-def write_config(args):
- config = ('\n'
- 'DECODER: '
- '/usr0/home/eschling/cdec/training/mira/kbest_cut_mira\n'
- 'INI FILE: '+args.config+'\n'
- 'WORKING DIRECTORY: '+args.output_dir+'\n'
- 'DEVSET: '+args.devset+'\n'
- 'EVAL METRIC: '+args.metric+'\n'
- 'MAX ITERATIONS: '+str(args.max_iterations)+'\n'
- 'DECODE NODES: '+str(args.jobs)+'\n'
- 'INITIAL WEIGHTS: '+args.weights+'\n')
+def log_config(args):
+ logging.info('WORKING DIRECTORY={}'.format(args.output_dir))
+ logging.info('INI FILE={}'.format(args.config))
+ logging.info('DEVSET={}'.format(args.devset))
+ logging.info('EVAL METRIC={}'.format(args.metric))
+ logging.info('MAX ITERATIONS={}'.format(args.max_iterations))
+ logging.info('PARALLEL JOBS={}'.format(args.jobs))
+ logging.info('INITIAL WEIGHTS={}'.format(args.weights))
if args.grammar_prefix:
- config += 'GRAMMAR PREFIX: '+str(args.grammar_prefix)+'\n'
+ logging.info('GRAMMAR PREFIX={}'.format(args.grammar_prefix))
if args.test:
- config += 'TEST SET: '+args.test+'\n'
+ logging.info('TEST SET={}'.format(args.test))
+ else:
+ logging.info('TEST SET=none specified')
if args.test_config:
- config += 'TEST CONFIG: '+args.test_config+'\n'
+ logging.info('TEST CONFIG={}'.format(args.test_config))
if args.email:
- config += 'EMAIL: '+args.email+'\n'
-
- logging.info(config)
+ logging.info('EMAIL={}'.format(args.email))
if __name__=='__main__':
main()
diff --git a/training/pro/mr_pro_map.cc b/training/pro/mr_pro_map.cc
index eef40b8a..a5e6e48f 100644
--- a/training/pro/mr_pro_map.cc
+++ b/training/pro/mr_pro_map.cc
@@ -2,7 +2,6 @@
#include <iostream>
#include <fstream>
#include <vector>
-#include <tr1/unordered_map>
#include <boost/functional/hash.hpp>
#include <boost/shared_ptr.hpp>
diff --git a/training/utils/candidate_set.cc b/training/utils/candidate_set.cc
index 087efec3..33dae9a3 100644
--- a/training/utils/candidate_set.cc
+++ b/training/utils/candidate_set.cc
@@ -1,6 +1,11 @@
#include "candidate_set.h"
-#include <tr1/unordered_set>
+#ifndef HAVE_OLD_CPP
+# include <unordered_set>
+#else
+# include <tr1/unordered_set>
+namespace std { using std::tr1::unordered_set; }
+#endif
#include <boost/functional/hash.hpp>
@@ -139,12 +144,12 @@ void CandidateSet::ReadFromFile(const string& file) {
void CandidateSet::Dedup() {
if(!SILENT) cerr << "Dedup in=" << cs.size();
- tr1::unordered_set<Candidate, CandidateHasher, CandidateCompare> u;
+ unordered_set<Candidate, CandidateHasher, CandidateCompare> u;
while(cs.size() > 0) {
u.insert(cs.back());
cs.pop_back();
}
- tr1::unordered_set<Candidate, CandidateHasher, CandidateCompare>::iterator it = u.begin();
+ unordered_set<Candidate, CandidateHasher, CandidateCompare>::iterator it = u.begin();
while (it != u.end()) {
cs.push_back(*it);
it = u.erase(it);
diff --git a/training/utils/online_optimizer.h b/training/utils/online_optimizer.h
index 28d89344..19223e9d 100644
--- a/training/utils/online_optimizer.h
+++ b/training/utils/online_optimizer.h
@@ -1,10 +1,10 @@
#ifndef _ONL_OPTIMIZE_H_
#define _ONL_OPTIMIZE_H_
-#include <tr1/memory>
#include <set>
#include <string>
#include <cmath>
+#include <boost/shared_ptr.hpp>
#include "sparse_vector.h"
struct LearningRateSchedule {
@@ -56,7 +56,7 @@ struct ExponentialDecayLearningRate : public LearningRateSchedule {
class OnlineOptimizer {
public:
virtual ~OnlineOptimizer();
- OnlineOptimizer(const std::tr1::shared_ptr<LearningRateSchedule>& s,
+ OnlineOptimizer(const boost::shared_ptr<LearningRateSchedule>& s,
size_t batch_size,
const std::vector<int>& frozen_feats = std::vector<int>())
: N_(batch_size),schedule_(s),k_() {
@@ -77,13 +77,13 @@ class OnlineOptimizer {
std::set<int> frozen_; // frozen (non-optimizing) features
private:
- std::tr1::shared_ptr<LearningRateSchedule> schedule_;
+ boost::shared_ptr<LearningRateSchedule> schedule_;
int k_; // iteration count
};
class CumulativeL1OnlineOptimizer : public OnlineOptimizer {
public:
- CumulativeL1OnlineOptimizer(const std::tr1::shared_ptr<LearningRateSchedule>& s,
+ CumulativeL1OnlineOptimizer(const boost::shared_ptr<LearningRateSchedule>& s,
size_t training_instances, double C,
const std::vector<int>& frozen) :
OnlineOptimizer(s, training_instances, frozen), C_(C), u_() {}
diff --git a/training/utils/optimize_test.cc b/training/utils/optimize_test.cc
index bff2ca03..72fcef6d 100644
--- a/training/utils/optimize_test.cc
+++ b/training/utils/optimize_test.cc
@@ -2,6 +2,7 @@
#include <iostream>
#include <sstream>
#include <boost/program_options/variables_map.hpp>
+#include <boost/shared_ptr.hpp>
#include "optimize.h"
#include "online_optimizer.h"
#include "sparse_vector.h"
@@ -96,14 +97,11 @@ void TestOptimizerVariants(int num_vars) {
cerr << oa.Name() << " SUCCESS\n";
}
-using namespace std::tr1;
-
void TestOnline() {
size_t N = 20;
double C = 1.0;
double eta0 = 0.2;
- std::tr1::shared_ptr<LearningRateSchedule> r(new ExponentialDecayLearningRate(N, eta0, 0.85));
- //shared_ptr<LearningRateSchedule> r(new StandardLearningRate(N, eta0));
+ boost::shared_ptr<LearningRateSchedule> r(new ExponentialDecayLearningRate(N, eta0, 0.85));
CumulativeL1OnlineOptimizer opt(r, N, C, std::vector<int>());
assert(r->eta(10) < r->eta(1));
}