summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
Diffstat (limited to 'training')
-rw-r--r--training/dtrain/CMakeLists.txt8
-rw-r--r--training/dtrain/README.md43
-rw-r--r--training/dtrain/dtrain.cc956
-rw-r--r--training/dtrain/dtrain.h203
-rw-r--r--training/dtrain/examples/parallelized/README5
-rw-r--r--training/dtrain/examples/parallelized/cdec.ini22
-rw-r--r--training/dtrain/examples/parallelized/dtrain.ini14
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.0.gzbin8318 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.1.gzbin358560 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.2.gzbin1014466 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.3.gzbin391811 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.4.gzbin149590 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.5.gzbin537024 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.6.gzbin291286 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.7.gzbin1038140 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.8.gzbin419889 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/grammar/grammar.out.9.gzbin409140 -> 0 bytes
-rw-r--r--training/dtrain/examples/parallelized/in10
-rw-r--r--training/dtrain/examples/parallelized/refs10
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.062
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.163
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.062
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.163
-rw-r--r--training/dtrain/examples/parallelized/work/shard.0.0.in5
-rw-r--r--training/dtrain/examples/parallelized/work/shard.0.0.refs5
-rw-r--r--training/dtrain/examples/parallelized/work/shard.1.0.in5
-rw-r--r--training/dtrain/examples/parallelized/work/shard.1.0.refs5
-rw-r--r--training/dtrain/examples/parallelized/work/weights.012
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.012
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.112
-rw-r--r--training/dtrain/examples/parallelized/work/weights.112
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.011
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.112
-rw-r--r--training/dtrain/examples/standard/README2
-rw-r--r--training/dtrain/examples/standard/cdec.ini27
-rw-r--r--training/dtrain/examples/standard/dtrain.ini27
-rw-r--r--training/dtrain/examples/standard/expected-output123
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.de.gzbin58324 -> 0 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.en.gzbin49600 -> 0 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.en.srilm.gzbin16017291 -> 0 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.grammar.gzbin1399924 -> 0 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.gzbin113504 -> 0 bytes
-rw-r--r--training/dtrain/examples/toy/cdec.ini4
-rw-r--r--training/dtrain/examples/toy/dtrain.ini13
-rw-r--r--training/dtrain/examples/toy/expected-output77
-rw-r--r--training/dtrain/examples/toy/grammar.gzbin219 -> 0 bytes
-rw-r--r--training/dtrain/examples/toy/src2
-rw-r--r--training/dtrain/examples/toy/tgt2
-rw-r--r--training/dtrain/kbestget.h88
-rw-r--r--training/dtrain/ksampler.h60
-rwxr-xr-xtraining/dtrain/lplp.rb35
-rw-r--r--training/dtrain/pairsampling.h141
-rwxr-xr-xtraining/dtrain/parallelize.rb177
-rw-r--r--training/dtrain/sample.h131
-rw-r--r--training/dtrain/score.cc283
-rw-r--r--training/dtrain/score.h509
-rw-r--r--training/dtrain/update.h235
-rw-r--r--training/mira/kbest_cut_mira.cc26
-rw-r--r--training/mira/kbest_mira.cc8
-rwxr-xr-xtraining/pro/pro.pl1
60 files changed, 1378 insertions, 2205 deletions
diff --git a/training/dtrain/CMakeLists.txt b/training/dtrain/CMakeLists.txt
index 027c80e4..eac7fc72 100644
--- a/training/dtrain/CMakeLists.txt
+++ b/training/dtrain/CMakeLists.txt
@@ -5,12 +5,10 @@ INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../../decoder)
set(dtrain_SRCS
dtrain.cc
- score.cc
dtrain.h
- kbestget.h
- ksampler.h
- pairsampling.h
- score.h)
+ sample.h
+ score.h
+ update.h)
add_executable(dtrain ${dtrain_SRCS})
target_link_libraries(dtrain libcdec ksearch mteval utils klm klm_util klm_util_double ${Boost_LIBRARIES} ${ZLIB_LIBRARIES} ${BZIP2_LIBRARIES} ${LIBLZMA_LIBRARIES} ${LIBDL_LIBRARIES})
diff --git a/training/dtrain/README.md b/training/dtrain/README.md
index aa1ab3e7..dc473568 100644
--- a/training/dtrain/README.md
+++ b/training/dtrain/README.md
@@ -1,35 +1,46 @@
This is a simple (and parallelizable) tuning method for cdec
-which is able to train the weights of very many (sparse) features
-on the training set.
+which enables training weights of very many (sparse) features
+on the full training set.
-It was used in these papers:
+Please cite as:
> "Joint Feature Selection in Distributed Stochastic
> Learning for Large-Scale Discriminative Training in
> SMT" (Simianer, Riezler, Dyer; ACL 2012)
>
-> "Multi-Task Learning for Improved Discriminative
-> Training in SMT" (Simianer, Riezler; WMT 2013)
->
-
Building
--------
-Builds when building cdec, see ../BUILDING .
-To build only parts needed for dtrain do
-```
- autoreconf -ifv
- ./configure
- cd training/dtrain/; make
-```
+Builds when building cdec, see ../../BUILDING .
Running
-------
-See directories under examples/ .
+Download runnable examples for all use cases from [1] and extract here.
+
+TODO
+----
+ * "stop_after" stop after X inputs
+ * "select_weights" average, best, last
+ * "rescale" rescale weight vector
+ * implement SVM objective?
+ * other variants of l1 regularization?
+ * l2 regularization?
+ * l1/l2 regularization?
+ * scale updates by bleu difference
+ * AdaGrad, per-coordinate learning rates
+ * batch update
+ * "repeat" iterate over k-best lists
+ * show k-best loss improvement
+ * "quiet"
+ * "verbose"
+ * fix output
Legal
-----
-Copyright (c) 2012-2013 by Patrick Simianer <p@simianer.de>
+Copyright (c) 2012-2015 by Patrick Simianer <p@simianer.de>
See the file LICENSE.txt in the root folder for the licensing terms that this software is
released under.
+
+[1] http://simianer.de/dtrain-example.tar.gz
+
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index ccb50af2..b488e661 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -1,698 +1,434 @@
#include "dtrain.h"
+#include "sample.h"
#include "score.h"
-#include "kbestget.h"
-#include "ksampler.h"
-#include "pairsampling.h"
+#include "update.h"
using namespace dtrain;
-
-bool
-dtrain_init(int argc, char** argv, po::variables_map* cfg)
-{
- po::options_description ini("Configuration File Options");
- ini.add_options()
- ("input", po::value<string>(), "input file (src)")
- ("refs,r", po::value<string>(), "references")
- ("bitext,b", po::value<string>(), "bitext: 'src ||| tgt'")
- ("output", po::value<string>()->default_value("-"), "output weights file, '-' for STDOUT")
- ("input_weights", po::value<string>(), "input weights file (e.g. from previous iteration)")
- ("decoder_config", po::value<string>(), "configuration file for cdec")
- ("print_weights", po::value<string>(), "weights to print on each iteration")
- ("stop_after", po::value<unsigned>()->default_value(0), "stop after X input sentences")
- ("keep", po::value<bool>()->zero_tokens(), "keep weights files for each iteration")
- ("epochs", po::value<unsigned>()->default_value(10), "# of iterations T (per shard)")
- ("k", po::value<unsigned>()->default_value(100), "how many translations to sample")
- ("sample_from", po::value<string>()->default_value("kbest"), "where to sample translations from: 'kbest', 'forest'")
- ("filter", po::value<string>()->default_value("uniq"), "filter kbest list: 'not', 'uniq'")
- ("pair_sampling", po::value<string>()->default_value("XYX"), "how to sample pairs: 'all', 'XYX' or 'PRO'")
- ("hi_lo", po::value<float>()->default_value(0.1), "hi and lo (X) for XYX (default 0.1), <= 0.5")
- ("pair_threshold", po::value<score_t>()->default_value(0.), "bleu [0,1] threshold to filter pairs")
- ("N", po::value<unsigned>()->default_value(4), "N for Ngrams (BLEU)")
- ("scorer", po::value<string>()->default_value("stupid_bleu"), "scoring: bleu, stupid_, smooth_, approx_, lc_")
- ("learning_rate", po::value<weight_t>()->default_value(1.0), "learning rate")
- ("gamma", po::value<weight_t>()->default_value(0.), "gamma for SVM (0 for perceptron)")
- ("select_weights", po::value<string>()->default_value("last"), "output best, last, avg weights ('VOID' to throw away)")
- ("rescale", po::value<bool>()->zero_tokens(), "rescale weight vector after each input")
- ("l1_reg", po::value<string>()->default_value("none"), "apply l1 regularization as in 'Tsuroka et al' (2010) UNTESTED")
- ("l1_reg_strength", po::value<weight_t>(), "l1 regularization strength")
- ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO
- ("approx_bleu_d", po::value<score_t>()->default_value(0.9), "discount for approx. BLEU")
- ("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair")
- ("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near")
- ("max_pairs", po::value<unsigned>()->default_value(std::numeric_limits<unsigned>::max()), "max. # of pairs per Sent.")
- ("pclr", po::value<string>()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate")
- ("batch", po::value<bool>()->zero_tokens(), "do batch optimization")
- ("repeat", po::value<unsigned>()->default_value(1), "repeat optimization over kbest list this number of times")
- ("check", po::value<bool>()->zero_tokens(), "produce list of loss differentials")
- ("noup", po::value<bool>()->zero_tokens(), "do not update weights");
- po::options_description cl("Command Line Options");
- cl.add_options()
- ("config,c", po::value<string>(), "dtrain config file")
- ("quiet,q", po::value<bool>()->zero_tokens(), "be quiet")
- ("verbose,v", po::value<bool>()->zero_tokens(), "be verbose");
- cl.add(ini);
- po::store(parse_command_line(argc, argv, cl), *cfg);
- if (cfg->count("config")) {
- ifstream ini_f((*cfg)["config"].as<string>().c_str());
- po::store(po::parse_config_file(ini_f, ini), *cfg);
- }
- po::notify(*cfg);
- if (!cfg->count("decoder_config")) {
- cerr << cl << endl;
- return false;
- }
- if ((*cfg)["sample_from"].as<string>() != "kbest"
- && (*cfg)["sample_from"].as<string>() != "forest") {
- cerr << "Wrong 'sample_from' param: '" << (*cfg)["sample_from"].as<string>() << "', use 'kbest' or 'forest'." << endl;
- return false;
- }
- if ((*cfg)["sample_from"].as<string>() == "kbest" && (*cfg)["filter"].as<string>() != "uniq" &&
- (*cfg)["filter"].as<string>() != "not") {
- cerr << "Wrong 'filter' param: '" << (*cfg)["filter"].as<string>() << "', use 'uniq' or 'not'." << endl;
- return false;
- }
- if ((*cfg)["pair_sampling"].as<string>() != "all" && (*cfg)["pair_sampling"].as<string>() != "XYX" &&
- (*cfg)["pair_sampling"].as<string>() != "PRO") {
- cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as<string>() << "'." << endl;
- return false;
- }
- if (cfg->count("hi_lo") && (*cfg)["pair_sampling"].as<string>() != "XYX") {
- cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl;
- }
- if ((*cfg)["hi_lo"].as<float>() > 0.5 || (*cfg)["hi_lo"].as<float>() < 0.01) {
- cerr << "hi_lo must lie in [0.01, 0.5]" << endl;
- return false;
- }
- if ((cfg->count("input")>0 || cfg->count("refs")>0) && cfg->count("bitext")>0) {
- cerr << "Provide 'input' and 'refs' or 'bitext', not both." << endl;
- return false;
- }
- if ((*cfg)["pair_threshold"].as<score_t>() < 0) {
- cerr << "The threshold must be >= 0!" << endl;
- return false;
- }
- if ((*cfg)["select_weights"].as<string>() != "last" && (*cfg)["select_weights"].as<string>() != "best" &&
- (*cfg)["select_weights"].as<string>() != "avg" && (*cfg)["select_weights"].as<string>() != "VOID") {
- cerr << "Wrong 'select_weights' param: '" << (*cfg)["select_weights"].as<string>() << "', use 'last' or 'best'." << endl;
- return false;
- }
- return true;
-}
-
int
main(int argc, char** argv)
{
- // handle most parameters
- po::variables_map cfg;
- if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong
- bool quiet = false;
- if (cfg.count("quiet")) quiet = true;
- bool verbose = false;
- if (cfg.count("verbose")) verbose = true;
- bool noup = false;
- if (cfg.count("noup")) noup = true;
- bool rescale = false;
- if (cfg.count("rescale")) rescale = true;
- bool keep = false;
- if (cfg.count("keep")) keep = true;
-
- const unsigned k = cfg["k"].as<unsigned>();
- const unsigned N = cfg["N"].as<unsigned>();
- const unsigned T = cfg["epochs"].as<unsigned>();
- const unsigned stop_after = cfg["stop_after"].as<unsigned>();
- const string filter_type = cfg["filter"].as<string>();
- const string sample_from = cfg["sample_from"].as<string>();
- const string pair_sampling = cfg["pair_sampling"].as<string>();
- const score_t pair_threshold = cfg["pair_threshold"].as<score_t>();
- const string select_weights = cfg["select_weights"].as<string>();
- const float hi_lo = cfg["hi_lo"].as<float>();
- const score_t approx_bleu_d = cfg["approx_bleu_d"].as<score_t>();
- const unsigned max_pairs = cfg["max_pairs"].as<unsigned>();
- int repeat = cfg["repeat"].as<unsigned>();
- bool check = false;
- if (cfg.count("check")) check = true;
- weight_t loss_margin = cfg["loss_margin"].as<weight_t>();
- bool batch = false;
- if (cfg.count("batch")) batch = true;
- if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max();
- bool scale_bleu_diff = false;
- if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
- const string pclr = cfg["pclr"].as<string>();
- bool average = false;
- if (select_weights == "avg")
- average = true;
+ // get configuration
+ po::variables_map conf;
+ if (!dtrain_init(argc, argv, &conf))
+ return 1;
+ const size_t k = conf["k"].as<size_t>();
+ const bool unique_kbest = conf["unique_kbest"].as<bool>();
+ const bool forest_sample = conf["forest_sample"].as<bool>();
+ const string score_name = conf["score"].as<string>();
+ const weight_t nakov_fix = conf["nakov_fix"].as<weight_t>();
+ const weight_t chiang_decay = conf["chiang_decay"].as<weight_t>();
+ const size_t N = conf["N"].as<size_t>();
+ const size_t T = conf["iterations"].as<size_t>();
+ const weight_t eta = conf["learning_rate"].as<weight_t>();
+ const weight_t margin = conf["margin"].as<weight_t>();
+ const weight_t cut = conf["cut"].as<weight_t>();
+ const bool adjust_cut = conf["adjust"].as<bool>();
+ const bool all_pairs = cut==0;
+ const bool average = conf["average"].as<bool>();
+ const bool pro = conf["pro_sampling"].as<bool>();
+ const bool structured = conf["structured"].as<bool>();
+ const weight_t threshold = conf["threshold"].as<weight_t>();
+ const size_t max_up = conf["max_pairs"].as<size_t>();
+ const weight_t l1_reg = conf["l1_reg"].as<weight_t>();
+ const bool keep = conf["keep"].as<bool>();
+ const bool noup = conf["disable_learning"].as<bool>();
+ const string output_fn = conf["output"].as<string>();
vector<string> print_weights;
- if (cfg.count("print_weights"))
- boost::split(print_weights, cfg["print_weights"].as<string>(), boost::is_any_of(" "));
+ boost::split(print_weights, conf["print_weights"].as<string>(),
+ boost::is_any_of(" "));
+ const string output_updates_fn = conf["output_updates"].as<string>();
+ const bool output_updates = output_updates_fn!="";
+ const string output_raw_fn = conf["output_raw"].as<string>();
+ const bool output_raw = output_raw_fn!="";
+ const bool use_adadelta = conf["adadelta"].as<bool>();
+ const weight_t adadelta_decay = conf["adadelta_decay"].as<weight_t>();
+ const weight_t adadelta_eta = 0.000001;
+ const string adadelta_input = conf["adadelta_input"].as<string>();
+ const string adadelta_output = conf["adadelta_output"].as<string>();
+ const size_t max_input = conf["stop_after"].as<size_t>();
+ const bool batch = conf["batch"].as<bool>();
// setup decoder
register_feature_functions();
SetSilent(true);
- ReadFile ini_rf(cfg["decoder_config"].as<string>());
- if (!quiet)
- cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl;
- Decoder decoder(ini_rf.stream());
-
- // scoring metric/scorer
- string scorer_str = cfg["scorer"].as<string>();
- LocalScorer* scorer;
- if (scorer_str == "bleu") {
- scorer = static_cast<BleuScorer*>(new BleuScorer);
- } else if (scorer_str == "stupid_bleu") {
- scorer = static_cast<StupidBleuScorer*>(new StupidBleuScorer);
- } else if (scorer_str == "fixed_stupid_bleu") {
- scorer = static_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer);
- } else if (scorer_str == "smooth_bleu") {
- scorer = static_cast<SmoothBleuScorer*>(new SmoothBleuScorer);
- } else if (scorer_str == "sum_bleu") {
- scorer = static_cast<SumBleuScorer*>(new SumBleuScorer);
- } else if (scorer_str == "sumexp_bleu") {
- scorer = static_cast<SumExpBleuScorer*>(new SumExpBleuScorer);
- } else if (scorer_str == "sumwhatever_bleu") {
- scorer = static_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer);
- } else if (scorer_str == "approx_bleu") {
- scorer = static_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d));
- } else if (scorer_str == "lc_bleu") {
- scorer = static_cast<LinearBleuScorer*>(new LinearBleuScorer(N));
+ ReadFile f(conf["decoder_conf"].as<string>());
+ Decoder decoder(f.stream());
+
+ // setup scorer & observer
+ Scorer* scorer;
+ if (score_name == "nakov") {
+ scorer = static_cast<NakovBleuScorer*>(new NakovBleuScorer(N, nakov_fix));
+ } else if (score_name == "papineni") {
+ scorer = static_cast<PapineniBleuScorer*>(new PapineniBleuScorer(N));
+ } else if (score_name == "lin") {
+ scorer = static_cast<LinBleuScorer*>(new LinBleuScorer(N));
+ } else if (score_name == "liang") {
+ scorer = static_cast<LiangBleuScorer*>(new LiangBleuScorer(N));
+ } else if (score_name == "chiang") {
+ scorer = static_cast<ChiangBleuScorer*>(new ChiangBleuScorer(N));
+ } else if (score_name == "sum") {
+ scorer = static_cast<SumBleuScorer*>(new SumBleuScorer(N));
} else {
- cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl;
- exit(1);
+ assert(false);
}
- vector<score_t> bleu_weights;
- scorer->Init(N, bleu_weights);
-
- // setup decoder observer
- MT19937 rng; // random number generator, only for forest sampling
HypSampler* observer;
- if (sample_from == "kbest")
- observer = static_cast<KBestGetter*>(new KBestGetter(k, filter_type));
+ if (forest_sample)
+ observer = new KSampler(k, scorer);
+ else if (unique_kbest)
+ observer = new KBestSampler(k, scorer);
else
- observer = static_cast<KSampler*>(new KSampler(k, &rng));
- observer->SetScorer(scorer);
+ observer = new KBestNoFilterSampler(k, scorer);
- // init weights
+ // weights
vector<weight_t>& decoder_weights = decoder.CurrentWeightVector();
- SparseVector<weight_t> lambdas, cumulative_penalties, w_average;
- if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as<string>(), &decoder_weights);
- Weights::InitSparseVector(decoder_weights, &lambdas);
-
- // meta params for perceptron, SVM
- weight_t eta = cfg["learning_rate"].as<weight_t>();
- weight_t gamma = cfg["gamma"].as<weight_t>();
-
- // faster perceptron: consider only misranked pairs, see
- bool faster_perceptron = false;
- if (gamma==0 && loss_margin==0) faster_perceptron = true;
-
- // l1 regularization
- bool l1naive = false;
- bool l1clip = false;
- bool l1cumul = false;
- weight_t l1_reg = 0;
- if (cfg["l1_reg"].as<string>() != "none") {
- string s = cfg["l1_reg"].as<string>();
- if (s == "naive") l1naive = true;
- else if (s == "clip") l1clip = true;
- else if (s == "cumul") l1cumul = true;
- l1_reg = cfg["l1_reg_strength"].as<weight_t>();
+ SparseVector<weight_t> lambdas, w_average;
+ if (conf.count("input_weights")) {
+ Weights::InitFromFile(conf["input_weights"].as<string>(), &decoder_weights);
+ Weights::InitSparseVector(decoder_weights, &lambdas);
}
- // output
- string output_fn = cfg["output"].as<string>();
// input
- bool read_bitext = false;
- string input_fn;
- if (cfg.count("bitext")) {
- read_bitext = true;
- input_fn = cfg["bitext"].as<string>();
+ string input_fn = conf["bitext"].as<string>();
+ ReadFile input(input_fn);
+ vector<string> buf; // decoder only accepts strings as input
+ vector<vector<Ngrams> > buffered_ngrams; // compute ngrams and lengths of references
+ vector<vector<size_t> > buffered_lengths; // (just once)
+ size_t input_sz = 0;
+
+ // output configuration
+ cerr << fixed << setprecision(4);
+ cerr << "Parameters:" << endl;
+ cerr << setw(25) << "bitext " << "'" << input_fn << "'" << endl;
+ cerr << setw(25) << "k " << k << endl;
+ if (unique_kbest && !forest_sample)
+ cerr << setw(25) << "unique k-best " << unique_kbest << endl;
+ if (forest_sample)
+ cerr << setw(25) << "forest " << forest_sample << endl;
+ if (all_pairs)
+ cerr << setw(25) << "all pairs " << all_pairs << endl;
+ else if (pro)
+ cerr << setw(25) << "PRO " << pro << endl;
+ cerr << setw(25) << "score " << "'" << score_name << "'" << endl;
+ if (score_name == "nakov")
+ cerr << setw(25) << "nakov fix " << nakov_fix << endl;
+ if (score_name == "chiang")
+ cerr << setw(25) << "chiang decay " << chiang_decay << endl;
+ cerr << setw(25) << "N " << N << endl;
+ cerr << setw(25) << "T " << T << endl;
+ cerr << scientific << setw(25) << "learning rate " << eta << endl;
+ cerr << setw(25) << "margin " << margin << endl;
+ if (!structured) {
+ cerr << fixed << setw(25) << "cut " << round(cut*100) << "%" << endl;
+ cerr << setw(25) << "adjust " << adjust_cut << endl;
} else {
- input_fn = cfg["input"].as<string>();
+ cerr << setw(25) << "struct. obj " << structured << endl;
}
- ReadFile input(input_fn);
- // buffer input for t > 0
- vector<string> src_str_buf; // source strings (decoder takes only strings)
- vector<vector<WordID> > ref_ids_buf; // references as WordID vecs
- ReadFile refs;
- string refs_fn;
- if (!read_bitext) {
- refs_fn = cfg["refs"].as<string>();
- refs.Init(refs_fn);
+ if (threshold > 0)
+ cerr << setw(25) << "threshold " << threshold << endl;
+ if (max_up != numeric_limits<size_t>::max())
+ cerr << setw(25) << "max up. " << max_up << endl;
+ if (noup)
+ cerr << setw(25) << "no up. " << noup << endl;
+ cerr << setw(25) << "average " << average << endl;
+ cerr << scientific << setw(25) << "l1 reg. " << l1_reg << endl;
+ cerr << setw(25) << "decoder conf " << "'"
+ << conf["decoder_conf"].as<string>() << "'" << endl;
+ cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
+ cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
+ if (conf.count("input_weights")) {
+ cerr << setw(25) << "weights in " << "'"
+ << conf["input_weights"].as<string>() << "'" << endl;
}
+ cerr << setw(25) << "batch " << batch << endl;
+ if (noup)
+ cerr << setw(25) << "no updates!" << endl;
+ if (use_adadelta) {
+ cerr << setw(25) << "adadelta " << use_adadelta << endl;
+ cerr << setw(25) << " decay " << adadelta_decay << endl;
+ if (adadelta_input != "")
+ cerr << setw(25) << "-input " << adadelta_input << endl;
+ if (adadelta_output != "")
+ cerr << setw(25) << "-output " << adadelta_output << endl;
+ }
+ cerr << "(1 dot per processed input)" << endl;
+
+ // meta
+ weight_t best=0., gold_prev=0.;
+ size_t best_iteration = 0;
+ time_t total_time = 0.;
- unsigned in_sz = std::numeric_limits<unsigned>::max(); // input index, input size
- vector<pair<score_t, score_t> > all_scores;
- score_t max_score = 0.;
- unsigned best_it = 0;
- float overall_time = 0.;
-
- // output cfg
- if (!quiet) {
- cerr << _p5;
- cerr << endl << "dtrain" << endl << "Parameters:" << endl;
- cerr << setw(25) << "k " << k << endl;
- cerr << setw(25) << "N " << N << endl;
- cerr << setw(25) << "T " << T << endl;
- cerr << setw(25) << "batch " << batch << endl;
- cerr << setw(26) << "scorer '" << scorer_str << "'" << endl;
- if (scorer_str == "approx_bleu")
- cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl;
- cerr << setw(25) << "sample from " << "'" << sample_from << "'" << endl;
- if (sample_from == "kbest")
- cerr << setw(25) << "filter " << "'" << filter_type << "'" << endl;
- if (!scale_bleu_diff) cerr << setw(25) << "learning rate " << eta << endl;
- else cerr << setw(25) << "learning rate " << "bleu diff" << endl;
- cerr << setw(25) << "gamma " << gamma << endl;
- cerr << setw(25) << "loss margin " << loss_margin << endl;
- cerr << setw(25) << "faster perceptron " << faster_perceptron << endl;
- cerr << setw(25) << "pairs " << "'" << pair_sampling << "'" << endl;
- if (pair_sampling == "XYX")
- cerr << setw(25) << "hi lo " << hi_lo << endl;
- cerr << setw(25) << "pair threshold " << pair_threshold << endl;
- cerr << setw(25) << "select weights " << "'" << select_weights << "'" << endl;
- if (cfg.count("l1_reg"))
- cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as<string>() << "'" << endl;
- if (rescale)
- cerr << setw(25) << "rescale " << rescale << endl;
- cerr << setw(25) << "pclr " << pclr << endl;
- cerr << setw(25) << "max pairs " << max_pairs << endl;
- cerr << setw(25) << "repeat " << repeat << endl;
- //cerr << setw(25) << "test k-best " << test_k_best << endl;
- cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl;
- cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
- if (!read_bitext)
- cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
- cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
- if (cfg.count("input_weights"))
- cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as<string>() << "'" << endl;
- if (stop_after > 0)
- cerr << setw(25) << "stop_after " << stop_after << endl;
- if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl;
+ // output
+ WriteFile out_up, out_raw;
+ if (output_raw) {
+ out_raw.Init(output_raw_fn);
+ *out_raw << setprecision(numeric_limits<double>::digits10+1);
+ }
+ if (output_updates) {
+ out_up.Init(output_updates_fn);
+ *out_up << setprecision(numeric_limits<double>::digits10+1);
}
- // pclr
- SparseVector<weight_t> learning_rates;
- // batch
- SparseVector<weight_t> batch_updates;
- score_t batch_loss;
+ // adadelta
+ SparseVector<weight_t> gradient_accum, update_accum;
+ if (use_adadelta && adadelta_input!="") {
+ vector<weight_t> grads_tmp;
+ Weights::InitFromFile(adadelta_input+".gradient.gz", &grads_tmp);
+ Weights::InitSparseVector(grads_tmp, &gradient_accum);
+ vector<weight_t> update_tmp;
+ Weights::InitFromFile(adadelta_input+".update.gz", &update_tmp);
+ Weights::InitSparseVector(update_tmp, &update_accum);
+ }
- for (unsigned t = 0; t < T; t++) // T epochs
+ for (size_t t = 0; t < T; t++) // T iterations
{
+ // batch update
+ SparseVector<weight_t> batch_update;
+
time_t start, end;
time(&start);
- score_t score_sum = 0.;
- score_t model_sum(0);
- unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0, kbest_loss_improve = 0;
- batch_loss = 0.;
- if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl;
+ weight_t gold_sum=0., model_sum=0.;
+ size_t i=0, num_up=0, feature_count=0, list_sz=0;
+
+ cerr << "Iteration #" << t+1 << " of " << T << "." << endl;
while(true)
{
+ bool next = true;
- string in;
- string ref;
- bool next = false, stop = false; // next iteration or premature stop
+ // getting input
if (t == 0) {
- if(!getline(*input, in)) next = true;
- if(read_bitext) {
- vector<string> strs;
- boost::algorithm::split_regex(strs, in, boost::regex(" \\|\\|\\| "));
- in = strs[0];
- ref = strs[1];
- }
- } else {
- if (ii == in_sz) next = true; // stop if we reach the end of our input
- }
- // stop after X sentences (but still go on for those)
- if (stop_after > 0 && stop_after == ii && !next) stop = true;
-
- // produce some pretty output
- if (!quiet && !verbose) {
- if (ii == 0) cerr << " ";
- if ((ii+1) % (DTRAIN_DOTS) == 0) {
- cerr << ".";
- cerr.flush();
- }
- if ((ii+1) % (20*DTRAIN_DOTS) == 0) {
- cerr << " " << ii+1 << endl;
- if (!next && !stop) cerr << " ";
- }
- if (stop) {
- if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl;
- cerr << "Stopping after " << stop_after << " input sentences." << endl;
+ string in;
+ if(!getline(*input, in)) {
+ next = false;
} else {
- if (next) {
- if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl;
+ vector<string> parts;
+ boost::algorithm::split_regex(parts, in, boost::regex(" \\|\\|\\| "));
+ buf.push_back(parts[0]);
+ parts.erase(parts.begin());
+ buffered_ngrams.push_back({});
+ buffered_lengths.push_back({});
+ for (auto s: parts) {
+ vector<WordID> r;
+ vector<string> toks;
+ boost::split(toks, s, boost::is_any_of(" "));
+ for (auto tok: toks)
+ r.push_back(TD::Convert(tok));
+ buffered_ngrams.back().emplace_back(ngrams(r, N));
+ buffered_lengths.back().push_back(r.size());
}
}
+ } else {
+ next = i<input_sz;
}
- // next iteration
- if (next || stop) break;
-
- // weights
- lambdas.init_vector(&decoder_weights);
+ if (max_input == i)
+ next = false;
- // getting input
- vector<WordID> ref_ids; // reference as vector<WordID>
- if (t == 0) {
- if (!read_bitext) {
- getline(*refs, ref);
- }
- vector<string> ref_tok;
- boost::split(ref_tok, ref, boost::is_any_of(" "));
- register_and_convert(ref_tok, ref_ids);
- ref_ids_buf.push_back(ref_ids);
- src_str_buf.push_back(in);
+ // produce some pretty output
+ if (next) {
+ if (i%20 == 0)
+ cerr << " ";
+ cerr << ".";
+ if ((i+1)%20==0)
+ cerr << " " << i+1 << endl;
} else {
- ref_ids = ref_ids_buf[ii];
- }
- observer->SetRef(ref_ids);
- if (t == 0)
- decoder.Decode(in, observer);
- else
- decoder.Decode(src_str_buf[ii], observer);
-
- // get (scored) samples
- vector<ScoredHyp>* samples = observer->GetSamples();
-
- if (verbose) {
- cerr << "--- ref for " << ii << ": ";
- if (t > 0) printWordIDVec(ref_ids_buf[ii]);
- else printWordIDVec(ref_ids);
- cerr << endl;
- for (unsigned u = 0; u < samples->size(); u++) {
- cerr << _p2 << _np << "[" << u << ". '";
- printWordIDVec((*samples)[u].w);
- cerr << "'" << endl;
- cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl;
- cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl;
- }
- }
-
- if (repeat == 1) {
- score_sum += (*samples)[0].score; // stats for 1best
- model_sum += (*samples)[0].model;
+ if (i%20 != 0)
+ cerr << " " << i << endl;
}
+ cerr.flush();
- f_count += observer->get_f_count();
- list_sz += observer->get_sz();
-
- // weight updates
- if (!noup) {
- // get pairs
- vector<pair<ScoredHyp,ScoredHyp> > pairs;
- if (pair_sampling == "all")
- all_pairs(samples, pairs, pair_threshold, max_pairs, faster_perceptron);
- if (pair_sampling == "XYX")
- partXYX(samples, pairs, pair_threshold, max_pairs, faster_perceptron, hi_lo);
- if (pair_sampling == "PRO")
- PROsampling(samples, pairs, pair_threshold, max_pairs);
- int cur_npairs = pairs.size();
- npairs += cur_npairs;
-
- score_t kbest_loss_first = 0.0, kbest_loss_last = 0.0;
-
- if (check) repeat = 2;
- vector<float> losses; // for check
-
- for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();
- it != pairs.end(); it++) {
- score_t model_diff = it->first.model - it->second.model;
- score_t loss = max(0.0, -1.0 * model_diff);
- losses.push_back(loss);
- kbest_loss_first += loss;
- }
-
- score_t kbest_loss = 0.0;
- for (int ki=0; ki < repeat; ki++) {
+ // stop iterating
+ if (!next) break;
- SparseVector<weight_t> lambdas_copy; // for l1 regularization
- SparseVector<weight_t> sum_up; // for pclr
- if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
+ // decode
+ if (t > 0 || i > 0)
+ lambdas.init_vector(&decoder_weights);
+ observer->reference_ngrams = &buffered_ngrams[i];
+ observer->reference_lengths = &buffered_lengths[i];
+ decoder.Decode(buf[i], observer);
+ vector<Hyp>* sample = &(observer->sample);
- unsigned pair_idx = 0; // for check
- for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();
- it != pairs.end(); it++) {
- score_t model_diff = it->first.model - it->second.model;
- score_t loss = max(0.0, -1.0 * model_diff);
+ // stats for 1-best
+ gold_sum += sample->front().gold;
+ model_sum += sample->front().model;
+ feature_count += observer->feature_count;
+ list_sz += observer->effective_size;
- if (check && ki==repeat-1) cout << losses[pair_idx] - loss << endl;
- pair_idx++;
+ if (output_raw)
+ output_sample(sample, out_raw, i);
- if (repeat > 1) {
- model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f);
- kbest_loss += loss;
- }
- bool rank_error = false;
- score_t margin;
- if (faster_perceptron) { // we only have considering misranked pairs
- rank_error = true; // pair sampling already did this for us
- margin = std::numeric_limits<float>::max();
- } else {
- rank_error = model_diff<=0.0;
- margin = fabs(model_diff);
- if (!rank_error && margin < loss_margin) margin_violations++;
- }
- if (rank_error && ki==0) rank_errors++;
- if (scale_bleu_diff) eta = it->first.score - it->second.score;
- if (rank_error || margin < loss_margin) {
- SparseVector<weight_t> diff_vec = it->first.f - it->second.f;
- if (batch) {
- batch_loss += max(0., -1.0 * model_diff);
- batch_updates += diff_vec;
- continue;
- }
- if (pclr != "no") {
- sum_up += diff_vec;
- } else {
- lambdas.plus_eq_v_times_s(diff_vec, eta);
- if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./cur_npairs));
- }
- }
- }
+ // update model
+ if (!noup) {
- // per-coordinate learning rate
- if (pclr != "no") {
- SparseVector<weight_t>::iterator it = sum_up.begin();
- for (; it != sum_up.end(); ++it) {
- if (pclr == "simple") {
- lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]);
- learning_rates[it->first]++;
- } else if (pclr == "adagrad") {
- if (learning_rates[it->first] == 0) {
- lambdas[it->first] += it->second * eta;
- } else {
- lambdas[it->first] += it->second * eta * learning_rates[it->first];
- }
- learning_rates[it->first] += pow(it->second, 2.0);
- }
- }
- }
+ SparseVector<weight_t> updates;
+ if (structured)
+ num_up += update_structured(sample, updates, margin,
+ out_up, i);
+ else if (all_pairs)
+ num_up += updates_all(sample, updates, max_up, threshold,
+ out_up, i);
+ else if (pro)
+ num_up += updates_pro(sample, updates, cut, max_up, threshold,
+ out_up, i);
+ else
+ num_up += updates_multipartite(sample, updates, cut, margin,
+ max_up, threshold, adjust_cut,
+ out_up, i);
+
+ SparseVector<weight_t> lambdas_copy;
+ if (l1_reg)
+ lambdas_copy = lambdas;
+
+ if (use_adadelta) { // adadelta update
+ SparseVector<weight_t> squared;
+ for (auto it: updates)
+ squared[it.first] = pow(it.second, 2.0);
+ gradient_accum *= adadelta_decay;
+ squared *= 1.0-adadelta_decay;
+ gradient_accum += squared;
+ SparseVector<weight_t> u = gradient_accum + update_accum;
+ for (auto it: u)
+ u[it.first] = -1.0*(
+ sqrt(update_accum[it.first]+adadelta_eta)
+ /
+ sqrt(gradient_accum[it.first]+adadelta_eta)
+ ) * updates[it.first];
+ lambdas += u;
+ update_accum *= adadelta_decay;
+ for (auto it: u)
+ u[it.first] = pow(it.second, 2.0);
+ update_accum = update_accum + (u*(1.0-adadelta_decay));
+ } else if (batch) {
+ batch_update += updates;
+ } else { // regular update
+ lambdas.plus_eq_v_times_s(updates, eta);
+ }
- // l1 regularization
- // please note that this regularizations happen
- // after a _sentence_ -- not after each example/pair!
- if (l1naive) {
- SparseVector<weight_t>::iterator it = lambdas.begin();
- for (; it != lambdas.end(); ++it) {
- if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
- it->second *= max(0.0000001, eta/(eta+learning_rates[it->first])); // FIXME
- learning_rates[it->first]++;
- it->second -= sign(it->second) * l1_reg;
- }
- }
- } else if (l1clip) {
- SparseVector<weight_t>::iterator it = lambdas.begin();
- for (; it != lambdas.end(); ++it) {
- if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
- if (it->second != 0) {
- weight_t v = it->second;
- if (v > 0) {
- it->second = max(0., v - l1_reg);
- } else {
- it->second = min(0., v + l1_reg);
- }
- }
- }
- }
- } else if (l1cumul) {
- weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input
- SparseVector<weight_t>::iterator it = lambdas.begin();
- for (; it != lambdas.end(); ++it) {
- if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
- if (it->second != 0) {
- weight_t v = it->second;
- weight_t penalized = 0.;
- if (v > 0) {
- penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first)));
- } else {
- penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first)));
- }
- it->second = penalized;
- cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized);
- }
- }
+ // update context for Chiang's approx. BLEU
+ if (score_name == "chiang") {
+ for (auto it: *sample) {
+ if (it.rank == 0) {
+ scorer->update_context(it.w, buffered_ngrams[i],
+ buffered_lengths[i], chiang_decay);
+ break;
}
}
+ }
- if (ki==repeat-1) { // done
- kbest_loss_last = kbest_loss;
- if (repeat > 1) {
- score_t best_model = -std::numeric_limits<score_t>::max();
- unsigned best_idx = 0;
- for (unsigned i=0; i < samples->size(); i++) {
- score_t s = lambdas.dot((*samples)[i].f);
- if (s > best_model) {
- best_idx = i;
- best_model = s;
- }
+ // \ell_1 regularization
+ // NB: regularization is done after each sentence,
+ // not after every single pair!
+ if (l1_reg) {
+ SparseVector<weight_t>::iterator it = lambdas.begin();
+ for (; it != lambdas.end(); ++it) {
+ weight_t v = it->second;
+ if (!v)
+ continue;
+ if (!lambdas_copy.get(it->first) // new or..
+ || lambdas_copy.get(it->first)!=v) // updated feature
+ {
+ if (v > 0) {
+ it->second = max(0., v - l1_reg);
+ } else {
+ it->second = min(0., v + l1_reg);
}
- score_sum += (*samples)[best_idx].score;
- model_sum += best_model;
}
}
- } // repeat
-
- if ((kbest_loss_first - kbest_loss_last) >= 0) kbest_loss_improve++;
+ }
} // noup
- if (rescale) lambdas /= lambdas.l2norm();
-
- ++ii;
+ i++;
} // input loop
- if (t == 0) in_sz = ii; // remember size of input (# lines)
-
+ if (t == 0)
+ input_sz = i; // remember size of input (# lines)
+ // batch
if (batch) {
- lambdas.plus_eq_v_times_s(batch_updates, eta);
- if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
- batch_updates.clear();
+ batch_update /= (weight_t)num_up;
+ lambdas.plus_eq_v_times_s(batch_update, eta);
+ lambdas.init_vector(&decoder_weights);
}
- if (average) w_average += lambdas;
-
- if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset();
-
- // print some stats
- score_t score_avg = score_sum/(score_t)in_sz;
- score_t model_avg = model_sum/(score_t)in_sz;
- score_t score_diff, model_diff;
- if (t > 0) {
- score_diff = score_avg - all_scores[t-1].first;
- model_diff = model_avg - all_scores[t-1].second;
- } else {
- score_diff = score_avg;
- model_diff = model_avg;
+ // update average
+ if (average)
+ w_average += lambdas;
+
+ if (adadelta_output != "") {
+ WriteFile g(adadelta_output+".gradient.gz");
+ for (auto it: gradient_accum)
+ *g << FD::Convert(it.first) << " " << it.second << endl;
+ WriteFile u(adadelta_output+".update.gz");
+ for (auto it: update_accum)
+ *u << FD::Convert(it.first) << " " << it.second << endl;
}
- unsigned nonz = 0;
- if (!quiet) nonz = (unsigned)lambdas.num_nonzero();
-
- if (!quiet) {
- cerr << _p5 << _p << "WEIGHTS" << endl;
- for (vector<string>::iterator it = print_weights.begin(); it != print_weights.end(); it++) {
- cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl;
+ // stats
+ weight_t gold_avg = gold_sum/(weight_t)input_sz;
+ cerr << setiosflags(ios::showpos) << scientific << "WEIGHTS" << endl;
+ for (auto name: print_weights) {
+ cerr << setw(18) << name << " = "
+ << lambdas.get(FD::Convert(name));
+ if (use_adadelta) {
+ weight_t rate = -1.0*(sqrt(update_accum[FD::Convert(name)]+adadelta_eta)
+ / sqrt(gradient_accum[FD::Convert(name)]+adadelta_eta));
+ cerr << " {" << rate << "}";
}
- cerr << " ---" << endl;
- cerr << _np << " 1best avg score: " << score_avg;
- cerr << _p << " (" << score_diff << ")" << endl;
- cerr << _np << " 1best avg model score: " << model_avg;
- cerr << _p << " (" << model_diff << ")" << endl;
- cerr << " avg # pairs: ";
- cerr << _np << npairs/(float)in_sz << endl;
- cerr << " avg # rank err: ";
- cerr << rank_errors/(float)in_sz;
- if (faster_perceptron) cerr << " (meaningless)";
cerr << endl;
- cerr << " avg # margin viol: ";
- cerr << margin_violations/(float)in_sz << endl;
- if (batch) cerr << " batch loss: " << batch_loss << endl;
- cerr << " k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
- cerr << " non0 feature count: " << nonz << endl;
- cerr << " avg list sz: " << list_sz/(float)in_sz << endl;
- cerr << " avg f count: " << f_count/(float)list_sz << endl;
- }
-
- pair<score_t,score_t> remember;
- remember.first = score_avg;
- remember.second = model_avg;
- all_scores.push_back(remember);
- if (score_avg > max_score) {
- max_score = score_avg;
- best_it = t;
}
- time (&end);
- float time_diff = difftime(end, start);
- overall_time += time_diff;
- if (!quiet) {
- cerr << _p2 << _np << "(time " << time_diff/60. << " min, ";
- cerr << time_diff/in_sz << " s/S)" << endl;
+ cerr << " ---" << endl;
+ cerr << resetiosflags(ios::showpos)
+ << " 1best avg score: " << gold_avg*100;
+ cerr << setiosflags(ios::showpos) << fixed << " ("
+ << (gold_avg-gold_prev)*100 << ")" << endl;
+ cerr << scientific << " 1best avg model score: "
+ << model_sum/(weight_t)input_sz << endl;
+ cerr << fixed;
+ cerr << " avg # updates: ";
+ cerr << resetiosflags(ios::showpos) << num_up/(float)input_sz << endl;
+ cerr << " non-0 feature count: " << lambdas.num_nonzero() << endl;
+ cerr << " avg f count: " << feature_count/(float)list_sz << endl;
+ cerr << " avg list sz: " << list_sz/(float)input_sz << endl;
+
+ if (gold_avg > best) {
+ best = gold_avg;
+ best_iteration = t;
}
- if (t+1 != T && !quiet) cerr << endl;
+ gold_prev = gold_avg;
- if (noup) break;
+ time (&end);
+ time_t time_diff = difftime(end, start);
+ total_time += time_diff;
+ cerr << "(time " << time_diff/60. << " min, ";
+ cerr << time_diff/input_sz << " s/S)" << endl;
+ if (t+1 != T) cerr << endl;
- // write weights to file
- if (select_weights == "best" || keep) {
+ if (keep) { // keep intermediate weights
lambdas.init_vector(&decoder_weights);
string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz";
Weights::WriteToFile(w_fn, decoder_weights, true);
}
- if (check) cout << "---" << endl;
-
} // outer loop
- if (average) w_average /= (weight_t)T;
-
- if (!noup) {
- if (!quiet) cerr << endl << "Writing weights file to '" << output_fn << "' ..." << endl;
- if (select_weights == "last" || average) { // last, average
- WriteFile of(output_fn); // works with '-'
- ostream& o = *of.stream();
- o.precision(17);
- o << _np;
- if (average) {
- for (SparseVector<weight_t>::iterator it = w_average.begin(); it != w_average.end(); ++it) {
- if (it->second == 0) continue;
- o << FD::Convert(it->first) << '\t' << it->second << endl;
- }
- } else {
- for (SparseVector<weight_t>::iterator it = lambdas.begin(); it != lambdas.end(); ++it) {
- if (it->second == 0) continue;
- o << FD::Convert(it->first) << '\t' << it->second << endl;
- }
- }
- } else if (select_weights == "VOID") { // do nothing with the weights
- } else { // best
- if (output_fn != "-") {
- CopyFile("weights."+boost::lexical_cast<string>(best_it)+".gz", output_fn);
- } else {
- ReadFile bestw("weights."+boost::lexical_cast<string>(best_it)+".gz");
- string o;
- cout.precision(17);
- cout << _np;
- while(getline(*bestw, o)) cout << o << endl;
- }
- if (!keep) {
- for (unsigned i = 0; i < T; i++) {
- string s = "weights." + boost::lexical_cast<string>(i) + ".gz";
- unlink(s.c_str());
- }
- }
- }
- if (!quiet) cerr << "done" << endl;
+ // final weights
+ if (average) {
+ w_average /= T;
+ w_average.init_vector(decoder_weights);
+ } else if (!keep) {
+ lambdas.init_vector(decoder_weights);
}
+ if (average || !keep)
+ Weights::WriteToFile(output_fn, decoder_weights, true);
- if (!quiet) {
- cerr << _p5 << _np << endl << "---" << endl << "Best iteration: ";
- cerr << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl;
- cerr << "This took " << overall_time/60. << " min." << endl;
- }
+ cerr << endl << "---" << endl << "Best iteration: ";
+ cerr << best_iteration+1 << " [GOLD = " << best*100 << "]." << endl;
+ cerr << "This took " << total_time/60. << " min." << endl;
+
+ return 0;
}
diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h
index 07bd9b65..883e6028 100644
--- a/training/dtrain/dtrain.h
+++ b/training/dtrain/dtrain.h
@@ -1,9 +1,6 @@
#ifndef _DTRAIN_H_
#define _DTRAIN_H_
-#define DTRAIN_DOTS 10 // after how many inputs to display a '.'
-#define DTRAIN_SCALE 100000
-
#include <iomanip>
#include <climits>
#include <string.h>
@@ -25,113 +22,125 @@ namespace po = boost::program_options;
namespace dtrain
{
-
-inline void register_and_convert(const vector<string>& strs, vector<WordID>& ids)
-{
- vector<string>::const_iterator it;
- for (it = strs.begin(); it < strs.end(); it++)
- ids.push_back(TD::Convert(*it));
-}
-
-inline string gettmpf(const string path, const string infix)
-{
- char fn[path.size() + infix.size() + 8];
- strcpy(fn, path.c_str());
- strcat(fn, "/");
- strcat(fn, infix.c_str());
- strcat(fn, "-XXXXXX");
- if (!mkstemp(fn)) {
- cerr << "Cannot make temp file in" << path << " , exiting." << endl;
- exit(1);
- }
- return string(fn);
-}
-
-typedef double score_t;
-
-struct ScoredHyp
+struct Hyp
{
- vector<WordID> w;
- SparseVector<double> f;
- score_t model;
- score_t score;
- unsigned rank;
+ Hyp() {}
+ Hyp(vector<WordID> w, SparseVector<weight_t> f, weight_t model, weight_t gold,
+ size_t rank) : w(w), f(f), model(model), gold(gold), rank(rank) {}
+
+ vector<WordID> w;
+ SparseVector<weight_t> f;
+ weight_t model, gold;
+ size_t rank;
};
-struct LocalScorer
+bool
+dtrain_init(int argc,
+ char** argv,
+ po::variables_map* conf)
{
- unsigned N_;
- vector<score_t> w_;
-
- virtual score_t
- Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0;
-
- virtual void Reset() {} // only for ApproxBleuScorer, LinearBleuScorer
-
- inline void
- Init(unsigned N, vector<score_t> weights)
- {
- assert(N > 0);
- N_ = N;
- if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_);
- else w_ = weights;
+ po::options_description opts("Configuration File Options");
+ opts.add_options()
+ ("bitext,b", po::value<string>(),
+ "bitext, source and references in a single file [e ||| f]")
+ ("decoder_conf,C", po::value<string>(),
+ "decoder configuration file")
+ ("iterations,T", po::value<size_t>()->default_value(15),
+ "number of iterations T")
+ ("k", po::value<size_t>()->default_value(100),
+ "sample size per input (e.g. size of k-best lists)")
+ ("unique_kbest", po::bool_switch()->default_value(true),
+ "unique k-best lists")
+ ("forest_sample", po::bool_switch()->default_value(false),
+ "sample k hyptheses from forest instead of using k-best list")
+ ("learning_rate,l", po::value<weight_t>()->default_value(0.00001),
+ "learning rate [only meaningful if margin>0 or input weights are given]")
+ ("l1_reg,r", po::value<weight_t>()->default_value(0.),
+ "l1 regularization strength [see Tsuruoka, Tsujii and Ananiadou (2009)]")
+ ("adadelta,D", po::bool_switch()->default_value(false),
+ "use AdaDelta dynamic learning rates")
+ ("adadelta_decay", po::value<weight_t>()->default_value(0.9),
+ "decay for AdaDelta algorithm")
+ ("adadelta_input", po::value<string>()->default_value(""),
+ "input for AdaDelta's parameters, two files: file.gradient, and file.update")
+ ("adadelta_output", po::value<string>()->default_value(""),
+ "prefix for outputting AdaDelta's parameters")
+ ("margin,m", po::value<weight_t>()->default_value(1.0),
+ "margin for margin perceptron [set =0 for standard perceptron]")
+ ("cut,u", po::value<weight_t>()->default_value(0.1),
+ "use top/bottom 10% (default) of k-best as 'good' and 'bad' for pair sampling, 0 to use all pairs")
+ ("adjust,A", po::bool_switch()->default_value(false),
+ "adjust cut for optimal pos. in k-best to cut")
+ ("score,s", po::value<string>()->default_value("nakov"),
+ "per-sentence BLEU (approx.)")
+ ("nakov_fix", po::value<weight_t>()->default_value(1.0),
+ "add to reference length [see score.h]")
+ ("chiang_decay", po::value<weight_t>()->default_value(0.9),
+ "decaying factor for Chiang's approx. BLEU")
+ ("N", po::value<size_t>()->default_value(4),
+ "N for BLEU approximation")
+ ("input_weights,w", po::value<string>(),
+ "weights to initialize model")
+ ("average,a", po::bool_switch()->default_value(true),
+ "output average weights")
+ ("keep,K", po::bool_switch()->default_value(false),
+ "output a weight file per iteration [as weights.T.gz]")
+ ("structured,S", po::bool_switch()->default_value(false),
+ "structured prediction objective [hope/fear] w/ SGD")
+ ("pro_sampling", po::bool_switch()->default_value(false),
+ "updates from pairs selected as shown in Fig.4 of (Hopkins and May, 2011) [Gamma=max_pairs (default 5000), Xi=cut (default 50); threshold default 0.05]")
+ ("threshold", po::value<weight_t>()->default_value(0.),
+ "(min.) threshold in terms of gold score for pair selection")
+ ("max_pairs",
+ po::value<size_t>()->default_value(numeric_limits<size_t>::max()),
+ "max. number of updates/pairs")
+ ("batch,B", po::bool_switch()->default_value(false),
+ "perform batch updates")
+ ("output,o", po::value<string>()->default_value("-"),
+ "output weights file, '-' for STDOUT")
+ ("disable_learning,X", po::bool_switch()->default_value(false),
+ "fix model")
+ ("output_updates,U", po::value<string>()->default_value(""),
+ "output updates (diff. vectors) [to filename]")
+ ("output_raw,R", po::value<string>()->default_value(""),
+ "output raw data (e.g. k-best lists) [to filename]")
+ ("stop_after", po::value<size_t>()->default_value(numeric_limits<size_t>::max()),
+ "only look at this number of segments")
+ ("print_weights,P", po::value<string>()->default_value("EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV"),
+ "list of weights to print after each iteration");
+ po::options_description clopts("Command Line Options");
+ clopts.add_options()
+ ("conf,c", po::value<string>(), "dtrain configuration file")
+ ("help,h", po::bool_switch(), "display options");
+ opts.add(clopts);
+ po::store(parse_command_line(argc, argv, opts), *conf);
+ cerr << "*dtrain*" << endl << endl;
+ if ((*conf)["help"].as<bool>()) {
+ cerr << setprecision(3) << opts << endl;
+
+ return false;
}
-
- inline score_t
- brevity_penalty(const unsigned hyp_len, const unsigned ref_len)
- {
- if (hyp_len > ref_len) return 1;
- return exp(1 - (score_t)ref_len/hyp_len);
+ if (conf->count("conf")) {
+ ifstream f((*conf)["conf"].as<string>().c_str());
+ po::store(po::parse_config_file(f, opts), *conf);
}
-};
-
-struct HypSampler : public DecoderObserver
-{
- LocalScorer* scorer_;
- vector<WordID>* ref_;
- unsigned f_count_, sz_;
- virtual vector<ScoredHyp>* GetSamples()=0;
- inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; }
- inline void SetRef(vector<WordID>& ref) { ref_ = &ref; }
- inline unsigned get_f_count() { return f_count_; }
- inline unsigned get_sz() { return sz_; }
-};
-
-struct HSReporter
-{
- string task_id_;
-
- HSReporter(string task_id) : task_id_(task_id) {}
+ po::notify(*conf);
+ if (!conf->count("decoder_conf")) {
+ cerr << "Missing decoder configuration." << endl;
+ cerr << opts << endl;
- inline void update_counter(string name, unsigned amount) {
- cerr << "reporter:counter:" << task_id_ << "," << name << "," << amount << endl;
+ return false;
}
- inline void update_gcounter(string name, unsigned amount) {
- cerr << "reporter:counter:Global," << name << "," << amount << endl;
- }
-};
+ if (!conf->count("bitext")) {
+ cerr << "No input bitext." << endl;
+ cerr << opts << endl;
-inline ostream& _np(ostream& out) { return out << resetiosflags(ios::showpos); }
-inline ostream& _p(ostream& out) { return out << setiosflags(ios::showpos); }
-inline ostream& _p2(ostream& out) { return out << setprecision(2); }
-inline ostream& _p5(ostream& out) { return out << setprecision(5); }
-
-inline void printWordIDVec(vector<WordID>& v, ostream& os=cerr)
-{
- for (unsigned i = 0; i < v.size(); i++) {
- os << TD::Convert(v[i]);
- if (i < v.size()-1) os << " ";
+ return false;
}
-}
-template<typename T>
-inline T sign(T z)
-{
- if (z == 0) return 0;
- return z < 0 ? -1 : +1;
+ return true;
}
-
} // namespace
#endif
diff --git a/training/dtrain/examples/parallelized/README b/training/dtrain/examples/parallelized/README
deleted file mode 100644
index 89715105..00000000
--- a/training/dtrain/examples/parallelized/README
+++ /dev/null
@@ -1,5 +0,0 @@
-run for example
- ../../parallelize.rb ./dtrain.ini 4 false 2 2 ./in ./refs
-
-final weights will be in the file work/weights.3
-
diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini
deleted file mode 100644
index 5773029a..00000000
--- a/training/dtrain/examples/parallelized/cdec.ini
+++ /dev/null
@@ -1,22 +0,0 @@
-formalism=scfg
-add_pass_through_rules=true
-intersection_strategy=cube_pruning
-cubepruning_pop_limit=200
-scfg_max_span_limit=15
-feature_function=WordPenalty
-feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz
-#feature_function=ArityPenalty
-#feature_function=CMR2008ReorderingFeatures
-#feature_function=Dwarf
-#feature_function=InputIndicator
-#feature_function=LexNullJump
-#feature_function=NewJump
-#feature_function=NgramFeatures
-#feature_function=NonLatinCount
-#feature_function=OutputIndicator
-#feature_function=RuleIdentityFeatures
-#feature_function=RuleNgramFeatures
-#feature_function=RuleShape
-#feature_function=SourceSpanSizeFeatures
-#feature_function=SourceWordPenalty
-#feature_function=SpanFeatures
diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini
deleted file mode 100644
index 0b0932d6..00000000
--- a/training/dtrain/examples/parallelized/dtrain.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-k=100
-N=4
-learning_rate=0.0001
-gamma=0
-loss_margin=1.0
-epochs=1
-scorer=stupid_bleu
-sample_from=kbest
-filter=uniq
-pair_sampling=XYX
-hi_lo=0.1
-select_weights=last
-print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
-decoder_config=cdec.ini
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz
deleted file mode 100644
index 1e28a24b..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz
deleted file mode 100644
index 372f5675..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz
deleted file mode 100644
index 145d0dc0..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz
deleted file mode 100644
index 105593ff..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz
deleted file mode 100644
index 30781f48..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz
deleted file mode 100644
index 834ee759..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz
deleted file mode 100644
index 2e76f348..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz
deleted file mode 100644
index 3741a887..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz
deleted file mode 100644
index ebf6bd0c..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz
deleted file mode 100644
index c1791059..00000000
--- a/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/parallelized/in b/training/dtrain/examples/parallelized/in
deleted file mode 100644
index 51d01fe7..00000000
--- a/training/dtrain/examples/parallelized/in
+++ /dev/null
@@ -1,10 +0,0 @@
-<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg>
-<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg>
-<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg>
-<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg>
-<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg>
-<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg>
-<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg>
-<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg>
diff --git a/training/dtrain/examples/parallelized/refs b/training/dtrain/examples/parallelized/refs
deleted file mode 100644
index 632e27b0..00000000
--- a/training/dtrain/examples/parallelized/refs
+++ /dev/null
@@ -1,10 +0,0 @@
-europe 's divided racial house
-a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
-the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
-while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
-an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
-mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
-it will not , as america 's racial history clearly shows .
-race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
-the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
-this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0
deleted file mode 100644
index c559dd4d..00000000
--- a/training/dtrain/examples/parallelized/work/out.0.0
+++ /dev/null
@@ -1,62 +0,0 @@
- cdec cfg 'cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Seeding random number sequence to 405292278
-
-dtrain
-Parameters:
- k 100
- N 4
- T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'work/shard.0.0.in'
- refs 'work/shard.0.0.refs'
- output 'work/weights.0.0'
-(a dot represents 10 inputs)
-Iteration #1 of 1.
- 5
-WEIGHTS
- Glue = +0.2663
- WordPenalty = -0.0079042
- LanguageModel = +0.44782
- LanguageModel_OOV = -0.0401
- PhraseModel_0 = -0.193
- PhraseModel_1 = +0.71321
- PhraseModel_2 = +0.85196
- PhraseModel_3 = -0.43986
- PhraseModel_4 = -0.44803
- PhraseModel_5 = -0.0538
- PhraseModel_6 = -0.1788
- PassThrough = -0.1477
- ---
- 1best avg score: 0.17521 (+0.17521)
- 1best avg model score: 21.556 (+21.556)
- avg # pairs: 1671.2
- avg # rank err: 1118.6
- avg # margin viol: 552.6
- non0 feature count: 12
- avg list sz: 100
- avg f count: 11.32
-(time 0.35 min, 4.2 s/S)
-
-Writing weights file to 'work/weights.0.0' ...
-done
-
----
-Best iteration: 1 [SCORE 'stupid_bleu'=0.17521].
-This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1
deleted file mode 100644
index 8bc7ea9c..00000000
--- a/training/dtrain/examples/parallelized/work/out.0.1
+++ /dev/null
@@ -1,63 +0,0 @@
- cdec cfg 'cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Seeding random number sequence to 43859692
-
-dtrain
-Parameters:
- k 100
- N 4
- T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'work/shard.0.0.in'
- refs 'work/shard.0.0.refs'
- output 'work/weights.0.1'
- weights in 'work/weights.0'
-(a dot represents 10 inputs)
-Iteration #1 of 1.
- 5
-WEIGHTS
- Glue = -0.2699
- WordPenalty = +0.080605
- LanguageModel = -0.026572
- LanguageModel_OOV = -0.30025
- PhraseModel_0 = -0.32076
- PhraseModel_1 = +0.67451
- PhraseModel_2 = +0.92
- PhraseModel_3 = -0.36402
- PhraseModel_4 = -0.592
- PhraseModel_5 = -0.0269
- PhraseModel_6 = -0.28755
- PassThrough = -0.33285
- ---
- 1best avg score: 0.26638 (+0.26638)
- 1best avg model score: 53.197 (+53.197)
- avg # pairs: 2028.6
- avg # rank err: 998.2
- avg # margin viol: 918.8
- non0 feature count: 12
- avg list sz: 100
- avg f count: 10.496
-(time 0.35 min, 4.2 s/S)
-
-Writing weights file to 'work/weights.0.1' ...
-done
-
----
-Best iteration: 1 [SCORE 'stupid_bleu'=0.26638].
-This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0
deleted file mode 100644
index 65d1e7dc..00000000
--- a/training/dtrain/examples/parallelized/work/out.1.0
+++ /dev/null
@@ -1,62 +0,0 @@
- cdec cfg 'cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Seeding random number sequence to 4126799437
-
-dtrain
-Parameters:
- k 100
- N 4
- T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'work/shard.1.0.in'
- refs 'work/shard.1.0.refs'
- output 'work/weights.1.0'
-(a dot represents 10 inputs)
-Iteration #1 of 1.
- 5
-WEIGHTS
- Glue = -0.3815
- WordPenalty = +0.20064
- LanguageModel = +0.95304
- LanguageModel_OOV = -0.264
- PhraseModel_0 = -0.22362
- PhraseModel_1 = +0.12254
- PhraseModel_2 = +0.26328
- PhraseModel_3 = +0.38018
- PhraseModel_4 = -0.48654
- PhraseModel_5 = +0
- PhraseModel_6 = -0.3645
- PassThrough = -0.2216
- ---
- 1best avg score: 0.10863 (+0.10863)
- 1best avg model score: -4.9841 (-4.9841)
- avg # pairs: 1345.4
- avg # rank err: 822.4
- avg # margin viol: 501
- non0 feature count: 11
- avg list sz: 100
- avg f count: 11.814
-(time 0.43 min, 5.2 s/S)
-
-Writing weights file to 'work/weights.1.0' ...
-done
-
----
-Best iteration: 1 [SCORE 'stupid_bleu'=0.10863].
-This took 0.43333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1
deleted file mode 100644
index f479fbbc..00000000
--- a/training/dtrain/examples/parallelized/work/out.1.1
+++ /dev/null
@@ -1,63 +0,0 @@
- cdec cfg 'cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Seeding random number sequence to 2112412848
-
-dtrain
-Parameters:
- k 100
- N 4
- T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'work/shard.1.0.in'
- refs 'work/shard.1.0.refs'
- output 'work/weights.1.1'
- weights in 'work/weights.0'
-(a dot represents 10 inputs)
-Iteration #1 of 1.
- 5
-WEIGHTS
- Glue = -0.3178
- WordPenalty = +0.11092
- LanguageModel = +0.17269
- LanguageModel_OOV = -0.13485
- PhraseModel_0 = -0.45371
- PhraseModel_1 = +0.38789
- PhraseModel_2 = +0.75311
- PhraseModel_3 = -0.38163
- PhraseModel_4 = -0.58817
- PhraseModel_5 = -0.0269
- PhraseModel_6 = -0.27315
- PassThrough = -0.16745
- ---
- 1best avg score: 0.13169 (+0.13169)
- 1best avg model score: 24.226 (+24.226)
- avg # pairs: 1951.2
- avg # rank err: 985.4
- avg # margin viol: 951
- non0 feature count: 12
- avg list sz: 100
- avg f count: 11.224
-(time 0.45 min, 5.4 s/S)
-
-Writing weights file to 'work/weights.1.1' ...
-done
-
----
-Best iteration: 1 [SCORE 'stupid_bleu'=0.13169].
-This took 0.45 min.
diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in
deleted file mode 100644
index 92f9c78e..00000000
--- a/training/dtrain/examples/parallelized/work/shard.0.0.in
+++ /dev/null
@@ -1,5 +0,0 @@
-<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg>
-<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg>
-<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg>
-<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg>
diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.refs b/training/dtrain/examples/parallelized/work/shard.0.0.refs
deleted file mode 100644
index bef68fee..00000000
--- a/training/dtrain/examples/parallelized/work/shard.0.0.refs
+++ /dev/null
@@ -1,5 +0,0 @@
-europe 's divided racial house
-a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
-the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
-while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
-an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in
deleted file mode 100644
index b7695ce7..00000000
--- a/training/dtrain/examples/parallelized/work/shard.1.0.in
+++ /dev/null
@@ -1,5 +0,0 @@
-<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg>
-<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg>
-<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg>
-<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg>
diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.refs b/training/dtrain/examples/parallelized/work/shard.1.0.refs
deleted file mode 100644
index 6076f6d5..00000000
--- a/training/dtrain/examples/parallelized/work/shard.1.0.refs
+++ /dev/null
@@ -1,5 +0,0 @@
-mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
-it will not , as america 's racial history clearly shows .
-race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
-the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
-this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0
deleted file mode 100644
index ddd595a8..00000000
--- a/training/dtrain/examples/parallelized/work/weights.0
+++ /dev/null
@@ -1,12 +0,0 @@
-LanguageModel 0.7004298992212881
-PhraseModel_2 0.5576194336478857
-PhraseModel_1 0.41787318415343155
-PhraseModel_4 -0.46728502545635164
-PhraseModel_3 -0.029839521598455515
-Glue -0.05760000000000068
-PhraseModel_6 -0.2716499999999978
-PhraseModel_0 -0.20831031065605327
-LanguageModel_OOV -0.15205000000000077
-PassThrough -0.1846500000000006
-WordPenalty 0.09636994553433414
-PhraseModel_5 -0.026900000000000257
diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0
deleted file mode 100644
index c9370b18..00000000
--- a/training/dtrain/examples/parallelized/work/weights.0.0
+++ /dev/null
@@ -1,12 +0,0 @@
-WordPenalty -0.0079041595706392243
-LanguageModel 0.44781580828279532
-LanguageModel_OOV -0.04010000000000042
-Glue 0.26629999999999948
-PhraseModel_0 -0.19299677809125185
-PhraseModel_1 0.71321026861732773
-PhraseModel_2 0.85195540993310537
-PhraseModel_3 -0.43986310822842656
-PhraseModel_4 -0.44802855630415955
-PhraseModel_5 -0.053800000000000514
-PhraseModel_6 -0.17879999999999835
-PassThrough -0.14770000000000036
diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1
deleted file mode 100644
index 8fad3de8..00000000
--- a/training/dtrain/examples/parallelized/work/weights.0.1
+++ /dev/null
@@ -1,12 +0,0 @@
-WordPenalty 0.080605055841244472
-LanguageModel -0.026571720531022844
-LanguageModel_OOV -0.30024999999999141
-Glue -0.26989999999999842
-PhraseModel_2 0.92000295209089566
-PhraseModel_1 0.67450748692470841
-PhraseModel_4 -0.5920000014976784
-PhraseModel_3 -0.36402437203127397
-PhraseModel_6 -0.28754999999999603
-PhraseModel_0 -0.32076244202907672
-PassThrough -0.33284999999999004
-PhraseModel_5 -0.026900000000000257
diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1
deleted file mode 100644
index 03058a16..00000000
--- a/training/dtrain/examples/parallelized/work/weights.1
+++ /dev/null
@@ -1,12 +0,0 @@
-PhraseModel_2 0.8365578543552836
-PhraseModel_4 -0.5900840266009169
-PhraseModel_1 0.5312000609786991
-PhraseModel_0 -0.3872342271319619
-PhraseModel_3 -0.3728279676912084
-Glue -0.2938500000000036
-PhraseModel_6 -0.2803499999999967
-PassThrough -0.25014999999999626
-LanguageModel_OOV -0.21754999999999702
-LanguageModel 0.07306061161169894
-WordPenalty 0.09576193325966899
-PhraseModel_5 -0.026900000000000257
diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0
deleted file mode 100644
index 6a6a65c1..00000000
--- a/training/dtrain/examples/parallelized/work/weights.1.0
+++ /dev/null
@@ -1,11 +0,0 @@
-WordPenalty 0.20064405063930751
-LanguageModel 0.9530439901597807
-LanguageModel_OOV -0.26400000000000112
-Glue -0.38150000000000084
-PhraseModel_0 -0.22362384322085468
-PhraseModel_1 0.12253609968953538
-PhraseModel_2 0.26328345736266612
-PhraseModel_3 0.38018406503151553
-PhraseModel_4 -0.48654149460854373
-PhraseModel_6 -0.36449999999999722
-PassThrough -0.22160000000000085
diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1
deleted file mode 100644
index f56ea4a2..00000000
--- a/training/dtrain/examples/parallelized/work/weights.1.1
+++ /dev/null
@@ -1,12 +0,0 @@
-WordPenalty 0.1109188106780935
-LanguageModel 0.17269294375442074
-LanguageModel_OOV -0.13485000000000266
-Glue -0.3178000000000088
-PhraseModel_2 0.75311275661967159
-PhraseModel_1 0.38789263503268989
-PhraseModel_4 -0.58816805170415531
-PhraseModel_3 -0.38163156335114284
-PhraseModel_6 -0.27314999999999739
-PhraseModel_0 -0.45370601223484697
-PassThrough -0.16745000000000249
-PhraseModel_5 -0.026900000000000257
diff --git a/training/dtrain/examples/standard/README b/training/dtrain/examples/standard/README
deleted file mode 100644
index ce37d31a..00000000
--- a/training/dtrain/examples/standard/README
+++ /dev/null
@@ -1,2 +0,0 @@
-Call `dtrain` from this folder with ../../dtrain -c dtrain.ini .
-
diff --git a/training/dtrain/examples/standard/cdec.ini b/training/dtrain/examples/standard/cdec.ini
deleted file mode 100644
index 3330dd71..00000000
--- a/training/dtrain/examples/standard/cdec.ini
+++ /dev/null
@@ -1,27 +0,0 @@
-formalism=scfg
-add_pass_through_rules=true
-scfg_max_span_limit=15
-intersection_strategy=cube_pruning
-cubepruning_pop_limit=200
-grammar=nc-wmt11.grammar.gz
-feature_function=WordPenalty
-feature_function=KLanguageModel ./nc-wmt11.en.srilm.gz
-# all currently working feature functions for translation:
-# (with those features active that were used in the ACL paper)
-#feature_function=ArityPenalty
-#feature_function=CMR2008ReorderingFeatures
-#feature_function=Dwarf
-#feature_function=InputIndicator
-#feature_function=LexNullJump
-#feature_function=NewJump
-#feature_function=NgramFeatures
-#feature_function=NonLatinCount
-#feature_function=OutputIndicator
-feature_function=RuleIdentityFeatures
-feature_function=RuleSourceBigramFeatures
-feature_function=RuleTargetBigramFeatures
-feature_function=RuleShape
-feature_function=LexicalFeatures 1 1 1
-#feature_function=SourceSpanSizeFeatures
-#feature_function=SourceWordPenalty
-#feature_function=SpanFeatures
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
deleted file mode 100644
index a515db02..00000000
--- a/training/dtrain/examples/standard/dtrain.ini
+++ /dev/null
@@ -1,27 +0,0 @@
-#input=./nc-wmt11.de.gz
-#refs=./nc-wmt11.en.gz
-bitext=./nc-wmt11.gz
-output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
-select_weights=avg # output average (over epochs) weight vector
-decoder_config=./cdec.ini # config for cdec
-# weights for these features will be printed on each iteration
-print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
-# newer version of the grammar extractor use different feature names:
-#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV
-stop_after=10 # stop epoch after 10 inputs
-
-# interesting stuff
-epochs=3 # run over input 3 times
-k=100 # use 100best lists
-N=4 # optimize (approx) BLEU4
-scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
-gamma=0 # use SVM reg
-sample_from=kbest # use kbest lists (as opposed to forest)
-filter=uniq # only unique entries in kbest (surface form)
-pair_sampling=XYX #
-hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
-pair_threshold=0 # minimum distance in BLEU (here: > 0)
-loss_margin=0 # update if correctly ranked, but within this margin
-repeat=1 # repeat training on a kbest list 1 times
-#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
deleted file mode 100644
index 2460cfbb..00000000
--- a/training/dtrain/examples/standard/expected-output
+++ /dev/null
@@ -1,123 +0,0 @@
- cdec cfg './cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ./nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
- Example feature: Shape_S00000_T00000
-T=1 I=1 D=1
-Seeding random number sequence to 2327685089
-
-dtrain
-Parameters:
- k 100
- N 4
- T 3
- batch 0
- scorer 'fixed_stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.1
- gamma 0
- loss margin 0
- faster perceptron 1
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'avg'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg './cdec.ini'
- input './nc-wmt11.gz'
- output '-'
- stop_after 10
-(a dot represents 10 inputs)
-Iteration #1 of 3.
- . 10
-Stopping after 10 input sentences.
-WEIGHTS
- Glue = +6.9
- WordPenalty = -46.426
- LanguageModel = +535.12
- LanguageModel_OOV = -123.5
- PhraseModel_0 = -160.73
- PhraseModel_1 = -350.13
- PhraseModel_2 = -187.81
- PhraseModel_3 = +172.04
- PhraseModel_4 = +0.90108
- PhraseModel_5 = +21.6
- PhraseModel_6 = +67.2
- PassThrough = -149.7
- ---
- 1best avg score: 0.23327 (+0.23327)
- 1best avg model score: -9084.9 (-9084.9)
- avg # pairs: 780.7
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 1389
- avg list sz: 91.3
- avg f count: 146.2
-(time 0.37 min, 2.2 s/S)
-
-Iteration #2 of 3.
- . 10
-WEIGHTS
- Glue = -43
- WordPenalty = -22.019
- LanguageModel = +591.53
- LanguageModel_OOV = -252.1
- PhraseModel_0 = -120.21
- PhraseModel_1 = -43.589
- PhraseModel_2 = +73.53
- PhraseModel_3 = +113.7
- PhraseModel_4 = -223.81
- PhraseModel_5 = +64
- PhraseModel_6 = +54.8
- PassThrough = -331.1
- ---
- 1best avg score: 0.29568 (+0.062413)
- 1best avg model score: -15879 (-6794.1)
- avg # pairs: 566.1
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 1931
- avg list sz: 91.3
- avg f count: 139.89
-(time 0.33 min, 2 s/S)
-
-Iteration #3 of 3.
- . 10
-WEIGHTS
- Glue = -44.3
- WordPenalty = -131.85
- LanguageModel = +230.91
- LanguageModel_OOV = -285.4
- PhraseModel_0 = -194.27
- PhraseModel_1 = -294.83
- PhraseModel_2 = -92.043
- PhraseModel_3 = -140.24
- PhraseModel_4 = +85.613
- PhraseModel_5 = +238.1
- PhraseModel_6 = +158.7
- PassThrough = -359.6
- ---
- 1best avg score: 0.37375 (+0.078067)
- 1best avg model score: -14519 (+1359.7)
- avg # pairs: 545.4
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 2218
- avg list sz: 91.3
- avg f count: 137.77
-(time 0.35 min, 2.1 s/S)
-
-Writing weights file to '-' ...
-done
-
----
-Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.37375].
-This took 1.05 min.
diff --git a/training/dtrain/examples/standard/nc-wmt11.de.gz b/training/dtrain/examples/standard/nc-wmt11.de.gz
deleted file mode 100644
index 0741fd92..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.de.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.en.gz b/training/dtrain/examples/standard/nc-wmt11.en.gz
deleted file mode 100644
index 1c0bd401..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.en.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz b/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz
deleted file mode 100644
index 7ce81057..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.grammar.gz b/training/dtrain/examples/standard/nc-wmt11.grammar.gz
deleted file mode 100644
index ce4024a1..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.grammar.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.gz b/training/dtrain/examples/standard/nc-wmt11.gz
deleted file mode 100644
index c39c5aef..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/toy/cdec.ini b/training/dtrain/examples/toy/cdec.ini
deleted file mode 100644
index e6c19abe..00000000
--- a/training/dtrain/examples/toy/cdec.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-formalism=scfg
-add_pass_through_rules=true
-grammar=grammar.gz
-#add_extra_pass_through_features=6
diff --git a/training/dtrain/examples/toy/dtrain.ini b/training/dtrain/examples/toy/dtrain.ini
deleted file mode 100644
index ef956df7..00000000
--- a/training/dtrain/examples/toy/dtrain.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-decoder_config=cdec.ini
-input=src
-refs=tgt
-output=-
-print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6
-k=4
-N=4
-epochs=2
-scorer=bleu
-sample_from=kbest
-filter=uniq
-pair_sampling=all
-learning_rate=1
diff --git a/training/dtrain/examples/toy/expected-output b/training/dtrain/examples/toy/expected-output
deleted file mode 100644
index 1da2aadd..00000000
--- a/training/dtrain/examples/toy/expected-output
+++ /dev/null
@@ -1,77 +0,0 @@
-Warning: hi_lo only works with pair_sampling XYX.
- cdec cfg 'cdec.ini'
-Seeding random number sequence to 1664825829
-
-dtrain
-Parameters:
- k 4
- N 4
- T 2
- scorer 'bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 1
- gamma 0
- loss margin 0
- pairs 'all'
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'src'
- refs 'tgt'
- output '-'
-(a dot represents 10 inputs)
-Iteration #1 of 2.
- 2
-WEIGHTS
- logp = +0
- shell_rule = -1
- house_rule = +2
- small_rule = -2
- little_rule = +3
- PassThrough = -5
- ---
- 1best avg score: 0.5 (+0.5)
- 1best avg model score: 2.5 (+2.5)
- avg # pairs: 4
- avg # rank err: 1.5
- avg # margin viol: 0
- non0 feature count: 6
- avg list sz: 4
- avg f count: 2.875
-(time 0 min, 0 s/S)
-
-Iteration #2 of 2.
- 2
-WEIGHTS
- logp = +0
- shell_rule = -1
- house_rule = +2
- small_rule = -2
- little_rule = +3
- PassThrough = -5
- ---
- 1best avg score: 1 (+0.5)
- 1best avg model score: 5 (+2.5)
- avg # pairs: 5
- avg # rank err: 0
- avg # margin viol: 0
- non0 feature count: 6
- avg list sz: 4
- avg f count: 3
-(time 0 min, 0 s/S)
-
-Writing weights file to '-' ...
-house_rule 2
-little_rule 3
-Glue -4
-PassThrough -5
-small_rule -2
-shell_rule -1
-done
-
----
-Best iteration: 2 [SCORE 'bleu'=1].
-This took 0 min.
diff --git a/training/dtrain/examples/toy/grammar.gz b/training/dtrain/examples/toy/grammar.gz
deleted file mode 100644
index 8eb0d29e..00000000
--- a/training/dtrain/examples/toy/grammar.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/toy/src b/training/dtrain/examples/toy/src
deleted file mode 100644
index 87e39ef2..00000000
--- a/training/dtrain/examples/toy/src
+++ /dev/null
@@ -1,2 +0,0 @@
-ich sah ein kleines haus
-ich fand ein kleines haus
diff --git a/training/dtrain/examples/toy/tgt b/training/dtrain/examples/toy/tgt
deleted file mode 100644
index 174926b3..00000000
--- a/training/dtrain/examples/toy/tgt
+++ /dev/null
@@ -1,2 +0,0 @@
-i saw a little house
-i found a little house
diff --git a/training/dtrain/kbestget.h b/training/dtrain/kbestget.h
deleted file mode 100644
index 85252db3..00000000
--- a/training/dtrain/kbestget.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#ifndef _DTRAIN_KBESTGET_H_
-#define _DTRAIN_KBESTGET_H_
-
-#include "kbest.h"
-
-namespace dtrain
-{
-
-
-struct KBestGetter : public HypSampler
-{
- const unsigned k_;
- const string filter_type_;
- vector<ScoredHyp> s_;
- unsigned src_len_;
-
- KBestGetter(const unsigned k, const string filter_type) :
- k_(k), filter_type_(filter_type) {}
-
- virtual void
- NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg)
- {
- src_len_ = smeta.GetSourceLength();
- KBestScored(*hg);
- }
-
- vector<ScoredHyp>* GetSamples() { return &s_; }
-
- void
- KBestScored(const Hypergraph& forest)
- {
- if (filter_type_ == "uniq") {
- KBestUnique(forest);
- } else if (filter_type_ == "not") {
- KBestNoFilter(forest);
- }
- }
-
- void
- KBestUnique(const Hypergraph& forest)
- {
- s_.clear(); sz_ = f_count_ = 0;
- KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,
- KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_);
- for (unsigned i = 0; i < k_; ++i) {
- const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, KBest::FilterUnique,
- prob_t, EdgeProb>::Derivation* d =
- kbest.LazyKthBest(forest.nodes_.size() - 1, i);
- if (!d) break;
- ScoredHyp h;
- h.w = d->yield;
- h.f = d->feature_values;
- h.model = log(d->score);
- h.rank = i;
- h.score = scorer_->Score(h.w, *ref_, i, src_len_);
- s_.push_back(h);
- sz_++;
- f_count_ += h.f.size();
- }
- }
-
- void
- KBestNoFilter(const Hypergraph& forest)
- {
- s_.clear(); sz_ = f_count_ = 0;
- KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(forest, k_);
- for (unsigned i = 0; i < k_; ++i) {
- const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d =
- kbest.LazyKthBest(forest.nodes_.size() - 1, i);
- if (!d) break;
- ScoredHyp h;
- h.w = d->yield;
- h.f = d->feature_values;
- h.model = log(d->score);
- h.rank = i;
- h.score = scorer_->Score(h.w, *ref_, i, src_len_);
- s_.push_back(h);
- sz_++;
- f_count_ += h.f.size();
- }
- }
-};
-
-
-} // namespace
-
-#endif
-
diff --git a/training/dtrain/ksampler.h b/training/dtrain/ksampler.h
deleted file mode 100644
index 29dab667..00000000
--- a/training/dtrain/ksampler.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _DTRAIN_KSAMPLER_H_
-#define _DTRAIN_KSAMPLER_H_
-
-#include "hg_sampler.h"
-
-namespace dtrain
-{
-
-
-bool
-cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b)
-{
- return a.model > b.model;
-}
-
-struct KSampler : public HypSampler
-{
- const unsigned k_;
- vector<ScoredHyp> s_;
- MT19937* prng_;
- score_t (*scorer)(NgramCounts&, const unsigned, const unsigned, unsigned, vector<score_t>);
- unsigned src_len_;
-
- explicit KSampler(const unsigned k, MT19937* prng) :
- k_(k), prng_(prng) {}
-
- virtual void
- NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg)
- {
- src_len_ = smeta.GetSourceLength();
- ScoredSamples(*hg);
- }
-
- vector<ScoredHyp>* GetSamples() { return &s_; }
-
- void ScoredSamples(const Hypergraph& forest) {
- s_.clear(); sz_ = f_count_ = 0;
- std::vector<HypergraphSampler::Hypothesis> samples;
- HypergraphSampler::sample_hypotheses(forest, k_, prng_, &samples);
- for (unsigned i = 0; i < k_; ++i) {
- ScoredHyp h;
- h.w = samples[i].words;
- h.f = samples[i].fmap;
- h.model = log(samples[i].model_score);
- h.rank = i;
- h.score = scorer_->Score(h.w, *ref_, i, src_len_);
- s_.push_back(h);
- sz_++;
- f_count_ += h.f.size();
- }
- sort(s_.begin(), s_.end(), cmp_hyp_by_model_d);
- for (unsigned i = 0; i < s_.size(); i++) s_[i].rank = i;
- }
-};
-
-
-} // namespace
-
-#endif
-
diff --git a/training/dtrain/lplp.rb b/training/dtrain/lplp.rb
index 86e835e8..ac3fb758 100755
--- a/training/dtrain/lplp.rb
+++ b/training/dtrain/lplp.rb
@@ -1,4 +1,6 @@
-# lplp.rb
+#!/usr/bin/env ruby
+
+require 'zipf'
# norms
def l0(feature_column, n)
@@ -19,7 +21,8 @@ end
# stats
def median(feature_column, n)
- return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0}).sort[feature_column.size/2]
+ return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0})
+ .sort[feature_column.size/2]
end
def mean(feature_column, n)
@@ -28,7 +31,7 @@ end
# selection
def select_k(weights, norm_fun, n, k=10000)
- weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p|
+ weights.sort{|a,b| norm_fun.call(b[1], n)<=>norm_fun.call(a[1], n)}.each { |p|
puts "#{p[0]}\t#{mean(p[1], n)}"
k -= 1
if k == 0 then break end
@@ -84,19 +87,24 @@ def _test()
end
#_test()
-
def usage()
- puts "lplp.rb <l0,l1,l2,linfty,mean,median> <cut|select_k> <k|threshold> <#shards> < <input>"
+ puts "lplp.rb <l0,l1,l2,linfty,mean,median,/path/to/file> <cut|select_k|feature_names> <k|threshold|--> <#shards> < <input>"
puts " l0...: norms for selection"
puts "select_k: only output top k (according to the norm of their column vector) features"
puts " cut: output features with weight >= threshold"
- puts " n: if we do not have a shard count use this number for averaging"
+ puts " n: number of shards for averaging"
exit 1
end
-if ARGV.size < 4 then usage end
-norm_fun = method(ARGV[0].to_sym)
+usage if ARGV.size<4
+norm_fun = nil
+feature_names = nil
type = ARGV[1]
+if type == 'feature_names'
+ feature_names = ARGV[0]
+else
+ norm_fun = method(ARGV[0].to_sym)
+end
x = ARGV[2].to_f
shard_count = ARGV[3].to_f
@@ -117,6 +125,17 @@ if type == 'cut'
cut(w, norm_fun, shard_count, x)
elsif type == 'select_k'
select_k(w, norm_fun, shard_count, x)
+elsif type == 'feature_names'
+ a = ReadFile.readlines_strip "#{fnames}"
+ h = {}
+ a.each { |i|
+ h[i] = true
+ }
+ w.each_pair { |k,v|
+ if h[k]
+ puts "#{k}\t#{mean(v, shard_count)}"
+ end
+ }
else
puts "oh oh"
end
diff --git a/training/dtrain/pairsampling.h b/training/dtrain/pairsampling.h
deleted file mode 100644
index 1a3c498c..00000000
--- a/training/dtrain/pairsampling.h
+++ /dev/null
@@ -1,141 +0,0 @@
-#ifndef _DTRAIN_PAIRSAMPLING_H_
-#define _DTRAIN_PAIRSAMPLING_H_
-
-namespace dtrain
-{
-
-
-bool
-accept_pair(score_t a, score_t b, score_t threshold)
-{
- if (fabs(a - b) < threshold) return false;
- return true;
-}
-
-bool
-cmp_hyp_by_score_d(ScoredHyp a, ScoredHyp b)
-{
- return a.score > b.score;
-}
-
-inline void
-all_pairs(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool misranked_only, float _unused=1)
-{
- sort(s->begin(), s->end(), cmp_hyp_by_score_d);
- unsigned sz = s->size();
- bool b = false;
- unsigned count = 0;
- for (unsigned i = 0; i < sz-1; i++) {
- for (unsigned j = i+1; j < sz; j++) {
- if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue;
- if (threshold > 0) {
- if (accept_pair((*s)[i].score, (*s)[j].score, threshold))
- training.push_back(make_pair((*s)[i], (*s)[j]));
- } else {
- if ((*s)[i].score != (*s)[j].score)
- training.push_back(make_pair((*s)[i], (*s)[j]));
- }
- if (++count == max) {
- b = true;
- break;
- }
- }
- if (b) break;
- }
-}
-
-/*
- * multipartite ranking
- * sort (descending) by bleu
- * compare top X to middle Y and low X
- * cmp middle Y to low X
- */
-
-inline void
-partXYX(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool misranked_only, float hi_lo)
-{
- unsigned sz = s->size();
- if (sz < 2) return;
- sort(s->begin(), s->end(), cmp_hyp_by_score_d);
- unsigned sep = round(sz*hi_lo);
- unsigned sep_hi = sep;
- if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi;
- else sep_hi = 1;
- bool b = false;
- unsigned count = 0;
- for (unsigned i = 0; i < sep_hi; i++) {
- for (unsigned j = sep_hi; j < sz; j++) {
- if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue;
- if (threshold > 0) {
- if (accept_pair((*s)[i].score, (*s)[j].score, threshold))
- training.push_back(make_pair((*s)[i], (*s)[j]));
- } else {
- if ((*s)[i].score != (*s)[j].score)
- training.push_back(make_pair((*s)[i], (*s)[j]));
- }
- if (++count == max) {
- b = true;
- break;
- }
- }
- if (b) break;
- }
- unsigned sep_lo = sz-sep;
- while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo;
- for (unsigned i = sep_hi; i < sz-sep_lo; i++) {
- for (unsigned j = sz-sep_lo; j < sz; j++) {
- if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue;
- if (threshold > 0) {
- if (accept_pair((*s)[i].score, (*s)[j].score, threshold))
- training.push_back(make_pair((*s)[i], (*s)[j]));
- } else {
- if ((*s)[i].score != (*s)[j].score)
- training.push_back(make_pair((*s)[i], (*s)[j]));
- }
- if (++count == max) return;
- }
- }
-}
-
-/*
- * pair sampling as in
- * 'Tuning as Ranking' (Hopkins & May, 2011)
- * count = 5000
- * threshold = 5% BLEU (0.05 for param 3)
- * cut = top 50
- */
-bool
-_PRO_cmp_pair_by_diff_d(pair<ScoredHyp,ScoredHyp> a, pair<ScoredHyp,ScoredHyp> b)
-{
- return (fabs(a.first.score - a.second.score)) > (fabs(b.first.score - b.second.score));
-}
-inline void
-PROsampling(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool _unused=false, float _also_unused=0)
-{
- sort(s->begin(), s->end(), cmp_hyp_by_score_d);
- unsigned max_count = 5000, count = 0, sz = s->size();
- bool b = false;
- for (unsigned i = 0; i < sz-1; i++) {
- for (unsigned j = i+1; j < sz; j++) {
- if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) {
- training.push_back(make_pair((*s)[i], (*s)[j]));
- if (++count == max_count) {
- b = true;
- break;
- }
- }
- }
- if (b) break;
- }
- if (training.size() > 50) {
- sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff_d);
- training.erase(training.begin()+50, training.end());
- }
- return;
-}
-
-
-} // namespace
-
-#endif
-
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 82600009..3159a888 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -1,170 +1,181 @@
#!/usr/bin/env ruby
require 'trollop'
+require 'zipf'
-def usage
- STDERR.write "Usage: "
- STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"] [--extra_qsub \"-l virtual_free=24G\"]\n"
- exit 1
+conf = Trollop::options do
+ opt :conf, "dtrain configuration", :type => :string, :short => '-c'
+ opt :input, "input as bitext (f ||| e)", :type => :string, :short => '-i'
+ opt :epochs, "number of epochs", :type => :int, :default => 10, :short => '-e'
+ opt :randomize, "randomize shards once", :type => :bool, :default => false, :short => '-z'
+ opt :reshard, "randomize after each epoch", :type => :bool, :default => false, :short => '-y'
+ opt :shards, "number of shards", :type => :int, :short => '-s'
+ opt :weights, "input weights for first epoch", :type => :string, :default => '', :short => '-w'
+ opt :lplp_args, "arguments for lplp.rb", :type => :string, :default => "l2 select_k 100000", :short => '-l'
+ opt :per_shard_decoder_configs, "give custom decoder config per shard", :type => :string, :short => '-o'
+ opt :processes_at_once, "jobs to run at oce", :type => :int, :default => 9999, :short => '-p'
+ opt :qsub, "use qsub", :type => :bool, :default => false, :short => '-q'
+ opt :qsub_args, "extra args for qsub", :type => :string, :default => "h_vmem=5G", :short => '-r'
+ opt :dtrain_binary, "path to dtrain binary", :type => :string, :short => '-d'
+ opt :adadelta, "use adadelta", :type => :bool, :default => false, :short => '-D'
end
-opts = Trollop::options do
- opt :config, "dtrain config file", :type => :string
- opt :epochs, "number of epochs", :type => :int, :default => 10
- opt :lplp_args, "arguments for lplp.rb", :type => :string, :default => "l2 select_k 100000"
- opt :randomize, "randomize shards before each epoch", :type => :bool, :short => '-z', :default => false
- opt :reshard, "reshard after each epoch", :type => :bool, :short => '-y', :default => false
- opt :shards, "number of shards", :type => :int
- opt :processes_at_once, "have this number (max) running at the same time", :type => :int, :default => 9999
- opt :input, "input", :type => :string
- opt :references, "references", :type => :string
- opt :qsub, "use qsub", :type => :bool, :default => false
- opt :dtrain_binary, "path to dtrain binary", :type => :string
- opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
- opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o'
- opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w'
-end
-usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
-
dtrain_dir = File.expand_path File.dirname(__FILE__)
-if not opts[:dtrain_binary]
+if not conf[:dtrain_binary]
dtrain_bin = "#{dtrain_dir}/dtrain"
else
- dtrain_bin = opts[:dtrain_binary]
+ dtrain_bin = conf[:dtrain_binary]
end
-ruby = '/usr/bin/ruby'
lplp_rb = "#{dtrain_dir}/lplp.rb"
-lplp_args = opts[:lplp_args]
-cat = '/bin/cat'
+lplp_args = conf[:lplp_args]
-ini = opts[:config]
-epochs = opts[:epochs]
-rand = opts[:randomize]
-reshard = opts[:reshard]
-predefined_shards = false
+dtrain_conf = conf[:conf]
+epochs = conf[:epochs]
+rand = conf[:randomize]
+reshard = conf[:reshard]
+predefined_shards = false
per_shard_decoder_configs = false
-if opts[:shards] == 0
+if conf[:shards] == 0
predefined_shards = true
num_shards = 0
- per_shard_decoder_configs = true if opts[:per_shard_decoder_configs]
+ per_shard_decoder_configs = true if conf[:per_shard_decoder_configs]
else
- num_shards = opts[:shards]
+ num_shards = conf[:shards]
end
-input = opts[:input]
-refs = opts[:references]
-use_qsub = opts[:qsub]
-shards_at_once = opts[:processes_at_once]
-first_input_weights = opts[:first_input_weights]
-opts[:extra_qsub] = "-l #{opts[:extra_qsub]}" if opts[:extra_qsub]!=""
+input = conf[:input]
+use_qsub = conf[:qsub]
+shards_at_once = conf[:processes_at_once]
+first_input_weights = conf[:weights]
+use_adadelta = conf[:adadelta]
`mkdir work`
-def make_shards(input, refs, num_shards, epoch, rand)
+def make_shards input, num_shards, epoch, rand
lc = `wc -l #{input}`.split.first.to_i
index = (0..lc-1).to_a
index.reverse!
index.shuffle! if rand
shard_sz = (lc / num_shards.to_f).round 0
leftover = lc - (num_shards*shard_sz)
- leftover = 0 if leftover < 0
+ leftover = [0, leftover].max
in_f = File.new input, 'r'
in_lines = in_f.readlines
- refs_f = File.new refs, 'r'
- refs_lines = refs_f.readlines
shard_in_files = []
- shard_refs_files = []
in_fns = []
- refs_fns = []
- new_num_shards = 0
+ real_num_shards = 0
0.upto(num_shards-1) { |shard|
break if index.size==0
- new_num_shards += 1
- in_fn = "work/shard.#{shard}.#{epoch}.in"
- shard_in = File.new in_fn, 'w+'
+ real_num_shards += 1
+ in_fn = "work/shard.#{shard}.#{epoch}.gz"
+ shard_in = WriteFile.new in_fn
in_fns << in_fn
- refs_fn = "work/shard.#{shard}.#{epoch}.refs"
- shard_refs = File.new refs_fn, 'w+'
- refs_fns << refs_fn
0.upto(shard_sz-1) { |i|
j = index.pop
+ break if !j
shard_in.write in_lines[j]
- shard_refs.write refs_lines[j]
}
shard_in_files << shard_in
- shard_refs_files << shard_refs
}
while leftover > 0
j = index.pop
+ break if !j
shard_in_files[-1].write in_lines[j]
- shard_refs_files[-1].write refs_lines[j]
leftover -= 1
end
- (shard_in_files + shard_refs_files).each do |f| f.close end
+ shard_in_files.each do |f| f.close end
in_f.close
- refs_f.close
- return in_fns, refs_fns, new_num_shards
+ return in_fns, real_num_shards
end
input_files = []
-refs_files = []
if predefined_shards
- input_files = File.new(input).readlines.map {|i| i.strip }
- refs_files = File.new(refs).readlines.map {|i| i.strip }
+ input_files = File.new(input).readlines.map { |i| i.strip }
if per_shard_decoder_configs
- decoder_configs = File.new(opts[:per_shard_decoder_configs]).readlines.map {|i| i.strip}
+ decoder_configs = ReadFile.readlines_strip(conf[:per_shard_decoder_configs]
+ ).map { |i| i.strip }
end
num_shards = input_files.size
else
- input_files, refs_files, num_shards = make_shards input, refs, num_shards, 0, rand
+ input_files, num_shards = make_shards input, num_shards, 0, rand
end
0.upto(epochs-1) { |epoch|
puts "epoch #{epoch+1}"
pids = []
input_weights = ''
- if epoch > 0 then input_weights = "--input_weights work/weights.#{epoch-1}" end
- weights_files = []
+ input_weights = "--input_weights work/weights.#{epoch-1}.gz" if epoch>0
shard = 0
remaining_shards = num_shards
while remaining_shards > 0
shards_at_once.times {
break if remaining_shards==0
- qsub_str_start = qsub_str_end = ''
- local_end = ''
+ qsub_str_start = qsub_str_end = local_end = ''
if use_qsub
- qsub_str_start = "qsub #{opts[:extra_qsub]} -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \""
+ qsub_str_start = "qsub -l #{conf[:qsub_args]} -cwd -sync y -b y -j y\
+ -o work/out.#{shard}.#{epoch}\
+ -N dtrain.#{shard}.#{epoch} \""
qsub_str_end = "\""
local_end = ''
else
local_end = "2>work/out.#{shard}.#{epoch}"
end
if per_shard_decoder_configs
- cdec_cfg = "--decoder_config #{decoder_configs[shard]}"
+ cdec_conf = "--decoder_conf #{decoder_configs[shard]}"
else
- cdec_cfg = ""
+ cdec_conf = ""
end
- if first_input_weights!='' && epoch == 0
+ adadelta_input = ""
+ adadelta_output = ""
+ if use_adadelta
+ adadelta_output = "--adadelta_output work/adadelta.#{shard}.#{epoch}"
+ if epoch > 0
+ adadelta_input = "--adadelta_input work/adadelta.#{epoch-1}"
+ end
+ end
+ if first_input_weights != '' && epoch == 0
input_weights = "--input_weights #{first_input_weights}"
end
pids << Kernel.fork {
- `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\
- --input #{input_files[shard]}\
- --refs #{refs_files[shard]}\
- --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`
+ `#{qsub_str_start}#{dtrain_bin} -c #{dtrain_conf} #{cdec_conf}\
+ #{input_weights}\
+ #{adadelta_output} #{adadelta_input}\
+ --bitext #{input_files[shard]}\
+ --output work/weights.#{shard}.#{epoch}.gz#{qsub_str_end} #{local_end}`
}
- weights_files << "work/weights.#{shard}.#{epoch}"
shard += 1
remaining_shards -= 1
}
pids.each { |pid| Process.wait(pid) }
pids.clear
end
- `#{cat} work/weights.*.#{epoch} > work/weights_cat`
- `#{ruby} #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat > work/weights.#{epoch}`
+ `zcat work/weights.*.#{epoch}.gz \
+ | ruby #{lplp_rb} #{lplp_args} #{num_shards} \
+ | gzip -c \
+ > work/weights.#{epoch}.gz`
+ if use_adadelta
+ h = {}
+ ReadFile.readlines_strip("work/weights.#{epoch}.gz").map { |line|
+ h[line.split.first] = true
+ }
+ max = (2**(0.size * 8 -2) -1)
+ ["gradient", "update"].each { |i|
+ `zcat work/adadelta.*.#{epoch}.#{i}.gz \
+ | ruby #{lplp_rb} l0 select_k #{max} #{num_shards} \
+ | gzip -c \
+ > work/adadelta_avg.#{i}.gz`
+ o = WriteFile.new "work/adadelta.#{epoch}.#{i}.gz"
+ ReadFile.readlines_strip("work/adadelta_avg.#{i}.gz").each { |line|
+ k,v = line.split
+ if h.has_key? k
+ o.write "#{k} #{v}\n"
+ end
+ }
+ `rm work/adadelta_avg.#{i}.gz`
+ o.close
+ }
+ end
if rand and reshard and epoch+1!=epochs
- input_files, refs_files, num_shards = make_shards input, refs, num_shards, epoch+1, rand
+ input_files, num_shards = make_shards input, num_shards, epoch+1, rand
end
}
-`rm work/weights_cat`
-
diff --git a/training/dtrain/sample.h b/training/dtrain/sample.h
new file mode 100644
index 00000000..860904fd
--- /dev/null
+++ b/training/dtrain/sample.h
@@ -0,0 +1,131 @@
+#ifndef _DTRAIN_SAMPLE_H_
+#define _DTRAIN_SAMPLE_H_
+
+#include "kbest.h"
+#include "hg_sampler.h"
+
+#include "score.h"
+
+namespace dtrain
+{
+
+struct HypSampler : public DecoderObserver
+{
+ size_t feature_count, effective_size;
+ vector<Hyp> sample;
+ vector<Ngrams>* reference_ngrams;
+ vector<size_t>* reference_lengths;
+
+ void
+ reset()
+ {
+ sample.clear();
+ effective_size = feature_count = 0;
+ }
+};
+
+struct KBestSampler : public HypSampler
+{
+ size_t k;
+ bool unique;
+ Scorer* scorer;
+
+ KBestSampler() {}
+ KBestSampler(const size_t k, Scorer* scorer) :
+ k(k), scorer(scorer) {}
+
+ virtual void
+ NotifyTranslationForest(const SentenceMetadata& /*smeta*/, Hypergraph* hg)
+ {
+ reset();
+ KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,
+ KBest::FilterUnique, prob_t, EdgeProb> kbest(*hg, k);
+ for (size_t i=0; i<k; ++i) {
+ KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,
+ KBest::FilterUnique, prob_t, EdgeProb>::Derivation* d =
+ kbest.LazyKthBest(hg->nodes_.size() - 1, i);
+ if (!d) break;
+ sample.emplace_back(
+ d->yield,
+ d->feature_values,
+ log(d->score),
+ scorer->score(d->yield, *reference_ngrams, *reference_lengths),
+ i
+ );
+ effective_size++;
+ feature_count += sample.back().f.size();
+ }
+ }
+};
+
+struct KBestNoFilterSampler : public KBestSampler
+{
+ size_t k;
+ bool unique;
+ Scorer* scorer;
+
+ KBestNoFilterSampler(const size_t k, Scorer* scorer) :
+ k(k), scorer(scorer) {}
+
+ virtual void
+ NotifyTranslationForest(const SentenceMetadata& /*smeta*/, Hypergraph* hg)
+ {
+ reset();
+ KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(*hg, k);
+ for (size_t i=0; i<k; ++i) {
+ const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d =
+ kbest.LazyKthBest(hg->nodes_.size() - 1, i);
+ if (!d) break;
+ sample.emplace_back(
+ d->yield,
+ d->feature_values,
+ log(d->score),
+ scorer->score(d->yield, *reference_ngrams, *reference_lengths),
+ i
+ );
+ effective_size++;
+ feature_count += sample.back().f.size();
+ }
+ }
+};
+
+struct KSampler : public HypSampler
+{
+ const size_t k;
+ Scorer* scorer;
+ MT19937 rng;
+
+ explicit KSampler(const unsigned k, Scorer* scorer) :
+ k(k), scorer(scorer) {}
+
+ virtual void
+ NotifyTranslationForest(const SentenceMetadata& /*smeta*/, Hypergraph* hg)
+ {
+ reset();
+ std::vector<HypergraphSampler::Hypothesis> hs;
+ HypergraphSampler::sample_hypotheses(*hg, k, &rng, &hs);
+ for (size_t i=0; i<k; ++i) {
+ sample.emplace_back(
+ hs[i].words,
+ hs[i].fmap,
+ log(hs[i].model_score),
+ 0,
+ 0
+ );
+ effective_size++;
+ feature_count += sample.back().f.size();
+ }
+ sort(sample.begin(), sample.end(), [](Hyp first, Hyp second) {
+ return first.model > second.model;
+ });
+ for (unsigned i=0; i<sample.size(); i++) {
+ sample[i].rank=i;
+ scorer->score(sample[i].w, *reference_ngrams, *reference_lengths);
+ }
+ }
+};
+
+} // namespace
+
+#endif
+
diff --git a/training/dtrain/score.cc b/training/dtrain/score.cc
deleted file mode 100644
index 127f34d2..00000000
--- a/training/dtrain/score.cc
+++ /dev/null
@@ -1,283 +0,0 @@
-#include "score.h"
-
-namespace dtrain
-{
-
-
-/*
- * bleu
- *
- * as in "BLEU: a Method for Automatic Evaluation
- * of Machine Translation"
- * (Papineni et al. '02)
- *
- * NOTE: 0 if for one n \in {1..N} count is 0
- */
-score_t
-BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len)
-{
- if (hyp_len == 0 || ref_len == 0) return 0.;
- unsigned M = N_;
- vector<score_t> v = w_;
- if (ref_len < N_) {
- M = ref_len;
- for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M);
- }
- score_t sum = 0;
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) return 0.;
- sum += v[i] * log((score_t)counts.clipped_[i]/counts.sum_[i]);
- }
- return brevity_penalty(hyp_len, ref_len) * exp(sum);
-}
-
-score_t
-BleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- return Bleu(counts, hyp_len, ref_len);
-}
-
-/*
- * 'stupid' bleu
- *
- * as in "ORANGE: a Method for Evaluating
- * Automatic Evaluation Metrics
- * for Machine Translation"
- * (Lin & Och '04)
- *
- * NOTE: 0 iff no 1gram match ('grounded')
- */
-score_t
-StupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- vector<score_t> v = w_;
- if (ref_len < N_) {
- M = ref_len;
- for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M);
- }
- score_t sum = 0, add = 0;
- for (unsigned i = 0; i < M; i++) {
- if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.;
- if (i == 1) add = 1;
- sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add)));
- }
- return brevity_penalty(hyp_len, ref_len) * exp(sum);
-}
-
-/*
- * fixed 'stupid' bleu
- *
- * as in "Optimizing for Sentence-Level BLEU+1
- * Yields Short Translations"
- * (Nakov et al. '12)
- */
-score_t
-FixedStupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- vector<score_t> v = w_;
- if (ref_len < N_) {
- M = ref_len;
- for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M);
- }
- score_t sum = 0, add = 0;
- for (unsigned i = 0; i < M; i++) {
- if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.;
- if (i == 1) add = 1;
- sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add)));
- }
- return brevity_penalty(hyp_len, ref_len+1) * exp(sum); // <- fix
-}
-
-/*
- * smooth bleu
- *
- * as in "An End-to-End Discriminative Approach
- * to Machine Translation"
- * (Liang et al. '06)
- *
- * NOTE: max is 0.9375 (with N=4)
- */
-score_t
-SmoothBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- if (ref_len < N_) M = ref_len;
- score_t sum = 0.;
- vector<score_t> i_bleu;
- for (unsigned i = 0; i < M; i++) i_bleu.push_back(0.);
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) {
- break;
- } else {
- score_t i_ng = log((score_t)counts.clipped_[i]/counts.sum_[i]);
- for (unsigned j = i; j < M; j++) {
- i_bleu[j] += (1/((score_t)j+1)) * i_ng;
- }
- }
- sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i));
- }
- return brevity_penalty(hyp_len, ref_len) * sum;
-}
-
-/*
- * 'sum' bleu
- *
- * sum up Ngram precisions
- */
-score_t
-SumBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- if (ref_len < N_) M = ref_len;
- score_t sum = 0.;
- unsigned j = 1;
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break;
- sum += ((score_t)counts.clipped_[i]/counts.sum_[i])/pow(2.0, (double) (N_-j+1));
- j++;
- }
- return brevity_penalty(hyp_len, ref_len) * sum;
-}
-
-/*
- * 'sum' (exp) bleu
- *
- * sum up exp(Ngram precisions)
- */
-score_t
-SumExpBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- if (ref_len < N_) M = ref_len;
- score_t sum = 0.;
- unsigned j = 1;
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break;
- sum += exp(((score_t)counts.clipped_[i]/counts.sum_[i]))/pow(2.0, (double) (N_-j+1));
- j++;
- }
- return brevity_penalty(hyp_len, ref_len) * sum;
-}
-
-/*
- * 'sum' (whatever) bleu
- *
- * sum up exp(weight * log(Ngram precisions))
- */
-score_t
-SumWhateverBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned /*rank*/, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (hyp_len == 0 || ref_len == 0) return 0.;
- NgramCounts counts = make_ngram_counts(hyp, ref, N_);
- unsigned M = N_;
- vector<score_t> v = w_;
- if (ref_len < N_) {
- M = ref_len;
- for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M);
- }
- score_t sum = 0.;
- unsigned j = 1;
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break;
- sum += exp(v[i] * log(((score_t)counts.clipped_[i]/counts.sum_[i])))/pow(2.0, (double) (N_-j+1));
- j++;
- }
- return brevity_penalty(hyp_len, ref_len) * sum;
-}
-
-/*
- * approx. bleu
- *
- * as in "Online Large-Margin Training of Syntactic
- * and Structural Translation Features"
- * (Chiang et al. '08)
- *
- * NOTE: Needs some more code in dtrain.cc .
- * No scaling by src len.
- */
-score_t
-ApproxBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned rank, const unsigned src_len)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (ref_len == 0) return 0.;
- score_t score = 0.;
- NgramCounts counts(N_);
- if (hyp_len > 0) {
- counts = make_ngram_counts(hyp, ref, N_);
- NgramCounts tmp = glob_onebest_counts_ + counts;
- score = Bleu(tmp, hyp_len, ref_len);
- }
- if (rank == 0) { // 'context of 1best translations'
- glob_onebest_counts_ += counts;
- glob_onebest_counts_ *= discount_;
- glob_hyp_len_ = discount_ * (glob_hyp_len_ + hyp_len);
- glob_ref_len_ = discount_ * (glob_ref_len_ + ref_len);
- glob_src_len_ = discount_ * (glob_src_len_ + src_len);
- }
- return score;
-}
-
-/*
- * Linear (Corpus) Bleu
- *
- * as in "Lattice Minimum Bayes-Risk Decoding
- * for Statistical Machine Translation"
- * (Tromble et al. '08)
- *
- */
-score_t
-LinearBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref,
- const unsigned rank, const unsigned /*src_len*/)
-{
- unsigned hyp_len = hyp.size(), ref_len = ref.size();
- if (ref_len == 0) return 0.;
- unsigned M = N_;
- if (ref_len < N_) M = ref_len;
- NgramCounts counts(M);
- if (hyp_len > 0)
- counts = make_ngram_counts(hyp, ref, M);
- score_t ret = 0.;
- for (unsigned i = 0; i < M; i++) {
- if (counts.sum_[i] == 0 || onebest_counts_.sum_[i] == 0) break;
- ret += counts.sum_[i]/onebest_counts_.sum_[i];
- }
- ret = -(hyp_len/(score_t)onebest_len_) + (1./M) * ret;
- if (rank == 0) {
- onebest_len_ += hyp_len;
- onebest_counts_ += counts;
- }
- return ret;
-}
-
-
-} // namespace
-
diff --git a/training/dtrain/score.h b/training/dtrain/score.h
index 1cdd3fa9..748b3317 100644
--- a/training/dtrain/score.h
+++ b/training/dtrain/score.h
@@ -6,215 +6,480 @@
namespace dtrain
{
-
struct NgramCounts
{
- unsigned N_;
- map<unsigned, score_t> clipped_;
- map<unsigned, score_t> sum_;
+ size_t N_;
+ map<size_t, weight_t> clipped;
+ map<size_t, weight_t> sum;
+
+ NgramCounts() {}
- NgramCounts(const unsigned N) : N_(N) { Zero(); }
+ NgramCounts(const size_t N) : N_(N) { zero(); }
inline void
operator+=(const NgramCounts& rhs)
{
- if (rhs.N_ > N_) Resize(rhs.N_);
- for (unsigned i = 0; i < N_; i++) {
- this->clipped_[i] += rhs.clipped_.find(i)->second;
- this->sum_[i] += rhs.sum_.find(i)->second;
+ if (rhs.N_ > N_) resize(rhs.N_);
+ for (size_t i = 0; i < N_; i++) {
+ this->clipped[i] += rhs.clipped.find(i)->second;
+ this->sum[i] += rhs.sum.find(i)->second;
}
}
- inline const NgramCounts
- operator+(const NgramCounts &other) const
- {
- NgramCounts result = *this;
- result += other;
- return result;
- }
-
inline void
- operator*=(const score_t rhs)
+ operator*=(const weight_t rhs)
{
- for (unsigned i = 0; i < N_; i++) {
- this->clipped_[i] *= rhs;
- this->sum_[i] *= rhs;
+ for (size_t i=0; i<N_; i++) {
+ this->clipped[i] *= rhs;
+ this->sum[i] *= rhs;
}
}
inline void
- Add(const unsigned count, const unsigned ref_count, const unsigned i)
+ add(const size_t count,
+ const size_t count_ref,
+ const size_t i)
{
assert(i < N_);
- if (count > ref_count) {
- clipped_[i] += ref_count;
+ if (count > count_ref) {
+ clipped[i] += count_ref;
} else {
- clipped_[i] += count;
+ clipped[i] += count;
}
- sum_[i] += count;
+ sum[i] += count;
}
inline void
- Zero()
+ zero()
{
- for (unsigned i = 0; i < N_; i++) {
- clipped_[i] = 0.;
- sum_[i] = 0.;
+ for (size_t i=0; i<N_; i++) {
+ clipped[i] = 0.;
+ sum[i] = 0.;
}
}
inline void
- One()
+ one()
{
- for (unsigned i = 0; i < N_; i++) {
- clipped_[i] = 1.;
- sum_[i] = 1.;
+ for (size_t i=0; i<N_; i++) {
+ clipped[i] = 1.;
+ sum[i] = 1.;
}
}
inline void
- Print()
- {
- for (unsigned i = 0; i < N_; i++) {
- cout << i+1 << "grams (clipped):\t" << clipped_[i] << endl;
- cout << i+1 << "grams:\t\t\t" << sum_[i] << endl;
- }
- }
-
- inline void Resize(unsigned N)
+ resize(size_t N)
{
if (N == N_) return;
else if (N > N_) {
- for (unsigned i = N_; i < N; i++) {
- clipped_[i] = 0.;
- sum_[i] = 0.;
+ for (size_t i = N_; i < N; i++) {
+ clipped[i] = 0.;
+ sum[i] = 0.;
}
} else { // N < N_
- for (unsigned i = N_-1; i > N-1; i--) {
- clipped_.erase(i);
- sum_.erase(i);
+ for (size_t i = N_-1; i > N-1; i--) {
+ clipped.erase(i);
+ sum.erase(i);
}
}
N_ = N;
}
};
-typedef map<vector<WordID>, unsigned> Ngrams;
+typedef map<vector<WordID>, size_t> Ngrams;
inline Ngrams
-make_ngrams(const vector<WordID>& s, const unsigned N)
+ngrams(const vector<WordID>& vw,
+ const size_t N)
{
- Ngrams ngrams;
+ Ngrams r;
vector<WordID> ng;
- for (size_t i = 0; i < s.size(); i++) {
+ for (size_t i=0; i<vw.size(); i++) {
ng.clear();
- for (unsigned j = i; j < min(i+N, s.size()); j++) {
- ng.push_back(s[j]);
- ngrams[ng]++;
+ for (size_t j=i; j<min(i+N, vw.size()); j++) {
+ ng.push_back(vw[j]);
+ r[ng]++;
}
}
- return ngrams;
+
+ return r;
}
inline NgramCounts
-make_ngram_counts(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned N)
+ngram_counts(const vector<WordID>& hyp,
+ const vector<Ngrams>& ngrams_ref,
+ const size_t N)
{
- Ngrams hyp_ngrams = make_ngrams(hyp, N);
- Ngrams ref_ngrams = make_ngrams(ref, N);
+ Ngrams ngrams_hyp = ngrams(hyp, N);
NgramCounts counts(N);
- Ngrams::iterator it;
- Ngrams::iterator ti;
- for (it = hyp_ngrams.begin(); it != hyp_ngrams.end(); it++) {
- ti = ref_ngrams.find(it->first);
- if (ti != ref_ngrams.end()) {
- counts.Add(it->second, ti->second, it->first.size() - 1);
- } else {
- counts.Add(it->second, 0, it->first.size() - 1);
+ Ngrams::iterator it, ti;
+ for (it = ngrams_hyp.begin(); it != ngrams_hyp.end(); it++) {
+ size_t max_ref_count = 0;
+ for (auto r: ngrams_ref) {
+ ti = r.find(it->first);
+ if (ti != r.end())
+ max_ref_count = max(max_ref_count, ti->second);
}
+ counts.add(it->second, min(it->second, max_ref_count), it->first.size()-1);
}
+
return counts;
}
-struct BleuScorer : public LocalScorer
+class Scorer
{
- score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len);
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
-};
+ protected:
+ const size_t N_;
+ vector<weight_t> w_;
+
+ public:
+ Scorer(size_t n): N_(n)
+ {
+ for (size_t i = 1; i <= N_; i++)
+ w_.push_back(1.0/N_);
+ }
-struct StupidBleuScorer : public LocalScorer
-{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
-};
+ inline bool
+ init(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths,
+ size_t& hl,
+ size_t& rl,
+ size_t& M,
+ vector<weight_t>& v,
+ NgramCounts& counts)
+ {
+ hl = hyp.size();
+ if (hl == 0)
+ return false;
+ rl = best_match_length(hl, reference_lengths);
+ if (rl == 0)
+ return false;
+ counts = ngram_counts(hyp, reference_ngrams, N_);
+ if (rl < N_) {
+ M = rl;
+ for (size_t i = 0; i < M; i++) v.push_back(1/((weight_t)M));
+ } else {
+ M = N_;
+ v = w_;
+ }
-struct FixedStupidBleuScorer : public LocalScorer
-{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
+ return true;
+ }
+
+ inline weight_t
+ brevity_penalty(const size_t hl,
+ const size_t rl)
+ {
+ if (hl > rl)
+ return 1;
+
+ return exp(1 - (weight_t)rl/hl);
+ }
+
+ inline size_t
+ best_match_length(const size_t hl,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t m;
+ if (reference_lengths.size() == 1) {
+ m = reference_lengths.front();
+ } else {
+ size_t i = 0, best_idx = 0;
+ size_t best = numeric_limits<size_t>::max();
+ for (auto l: reference_lengths) {
+ size_t d = abs(hl-l);
+ if (d < best) {
+ best_idx = i;
+ best = d;
+ }
+ i += 1;
+ }
+ m = reference_lengths[best_idx];
+ }
+
+ return m;
+ }
+
+ virtual weight_t
+ score(const vector<WordID>&,
+ const vector<Ngrams>&,
+ const vector<size_t>&) = 0;
+
+ void
+ update_context(const vector<WordID>& /*hyp*/,
+ const vector<Ngrams>& /*reference_ngrams*/,
+ const vector<size_t>& /*reference_lengths*/,
+ weight_t /*decay*/) {}
};
-struct SmoothBleuScorer : public LocalScorer
+/*
+ * ['fixed'] per-sentence BLEU
+ * simply add 'fix' (1) to reference length for calculation of BP
+ * to avoid short translations
+ *
+ * as in "Optimizing for Sentence-Level BLEU+1
+ * Yields Short Translations"
+ * (Nakov et al. '12)
+ *
+ */
+class NakovBleuScorer : public Scorer
{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
+ weight_t fix;
+
+ public:
+ NakovBleuScorer(size_t n, weight_t fix) : Scorer(n), fix(fix) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ if (!init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts))
+ return 0.;
+ weight_t sum=0, add=0;
+ for (size_t i=0; i<M; i++) {
+ if (i == 0 && (counts.sum[i]==0 || counts.clipped[i]==0)) return 0.;
+ if (i > 0) add = 1;
+ sum += v[i] * log(((weight_t)counts.clipped[i] + add)
+ / ((counts.sum[i] + add)));
+ }
+
+ return brevity_penalty(hl, rl+1) * exp(sum);
+ }
};
-struct SumBleuScorer : public LocalScorer
+/*
+ * BLEU
+ * 0 if for one n \in {1..N} count is 0
+ *
+ * as in "BLEU: a Method for Automatic Evaluation
+ * of Machine Translation"
+ * (Papineni et al. '02)
+ *
+ */
+class PapineniBleuScorer : public Scorer
{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
+ public:
+ PapineniBleuScorer(size_t n) : Scorer(n) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ if (!init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts))
+ return 0.;
+ weight_t sum = 0;
+ for (size_t i=0; i<M; i++) {
+ if (counts.sum[i] == 0 || counts.clipped[i] == 0) return 0.;
+ sum += v[i] * log((weight_t)counts.clipped[i]/counts.sum[i]);
+ }
+
+ return brevity_penalty(hl, rl) * exp(sum);
+ }
};
-struct SumExpBleuScorer : public LocalScorer
+/*
+ * original BLEU+1
+ * 0 iff no 1gram match ('grounded')
+ *
+ * as in "ORANGE: a Method for Evaluating
+ * Automatic Evaluation Metrics
+ * for Machine Translation"
+ * (Lin & Och '04)
+ *
+ */
+class LinBleuScorer : public Scorer
{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {}
+ public:
+ LinBleuScorer(size_t n) : Scorer(n) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ if (!init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts))
+ return 0.;
+ weight_t sum=0, add=0;
+ for (size_t i=0; i<M; i++) {
+ if (i == 0 && (counts.sum[i]==0 || counts.clipped[i]==0)) return 0.;
+ if (i == 1) add = 1;
+ sum += v[i] * log(((weight_t)counts.clipped[i] + add)
+ / ((counts.sum[i] + add)));
+ }
+
+ return brevity_penalty(hl, rl) * exp(sum);
+ }
};
-struct SumWhateverBleuScorer : public LocalScorer
+/*
+ * smooth BLEU
+ * max is 0.9375 (with N=4)
+ *
+ * as in "An End-to-End Discriminative Approach
+ * to Machine Translation"
+ * (Liang et al. '06)
+ *
+ */
+class LiangBleuScorer : public Scorer
{
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
- void Reset() {};
+ public:
+ LiangBleuScorer(size_t n) : Scorer(n) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl=hyp.size(), rl=best_match_length(hl, reference_lengths);
+ if (hl == 0 || rl == 0) return 0.;
+ NgramCounts counts = ngram_counts(hyp, reference_ngrams, N_);
+ size_t M = N_;
+ if (rl < N_) M = rl;
+ weight_t sum = 0.;
+ vector<weight_t> i_bleu;
+ for (size_t i=0; i<M; i++)
+ i_bleu.push_back(0.);
+ for (size_t i=0; i<M; i++) {
+ if (counts.sum[i]==0 || counts.clipped[i]==0) {
+ break;
+ } else {
+ weight_t i_score = log((weight_t)counts.clipped[i]/counts.sum[i]);
+ for (size_t j=i; j<M; j++) {
+ i_bleu[j] += (1/((weight_t)j+1)) * i_score;
+ }
+ }
+ sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i));
+ }
+
+ return brevity_penalty(hl, rl) * sum;
+ }
};
-struct ApproxBleuScorer : public BleuScorer
+/*
+ * approx. bleu
+ * Needs some more code in dtrain.cc .
+ * We do not scale by source length, as hypotheses are compared only
+ * within single k-best lists, not globally (as in batch algorithms).
+ * TODO: reset after one iteration?
+ * TODO: maybe scale by source length?
+ *
+ * as in "Online Large-Margin Training of Syntactic
+ * and Structural Translation Features"
+ * (Chiang et al. '08)
+ *
+ */
+class ChiangBleuScorer : public Scorer
{
- NgramCounts glob_onebest_counts_;
- unsigned glob_hyp_len_, glob_ref_len_, glob_src_len_;
- score_t discount_;
-
- ApproxBleuScorer(unsigned N, score_t d) : glob_onebest_counts_(NgramCounts(N)), discount_(d)
- {
- glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0;
- }
+ private:
+ NgramCounts context;
+ weight_t hyp_sz_sum;
+ weight_t ref_sz_sum;
+
+ public:
+ ChiangBleuScorer(size_t n) :
+ Scorer(n), context(n), hyp_sz_sum(0), ref_sz_sum(0) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ if (!init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts))
+ return 0.;
+ counts += context;
+ weight_t sum = 0;
+ for (size_t i = 0; i < M; i++) {
+ if (counts.sum[i]==0 || counts.clipped[i]==0) return 0.;
+ sum += v[i] * log((weight_t)counts.clipped[i] / counts.sum[i]);
+ }
- inline void Reset() {
- glob_onebest_counts_.Zero();
- glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0.;
- }
+ return brevity_penalty(hyp_sz_sum+hl, ref_sz_sum+rl) * exp(sum);
+ }
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len);
+ void
+ update_context(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths,
+ weight_t decay=0.9)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts);
+
+ context += counts;
+ context *= decay;
+ hyp_sz_sum += hl;
+ hyp_sz_sum *= decay;
+ ref_sz_sum += rl;
+ ref_sz_sum *= decay;
+ }
};
-struct LinearBleuScorer : public BleuScorer
+/*
+ * 'sum' bleu
+ *
+ * Merely sum up Ngram precisions
+ */
+class SumBleuScorer : public Scorer
{
- unsigned onebest_len_;
- NgramCounts onebest_counts_;
-
- LinearBleuScorer(unsigned N) : onebest_len_(1), onebest_counts_(N)
- {
- onebest_counts_.One();
- }
-
- score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/);
+ public:
+ SumBleuScorer(size_t n) : Scorer(n) {}
+
+ weight_t
+ score(const vector<WordID>& hyp,
+ const vector<Ngrams>& reference_ngrams,
+ const vector<size_t>& reference_lengths)
+ {
+ size_t hl, rl, M;
+ vector<weight_t> v;
+ NgramCounts counts;
+ if (!init(hyp, reference_ngrams, reference_lengths, hl, rl, M, v, counts))
+ return 0.;
+ weight_t sum = 0.;
+ size_t j = 1;
+ for (size_t i=0; i<M; i++) {
+ if (counts.sum[i]==0 || counts.clipped[i]==0) break;
+ sum += ((weight_t)counts.clipped[i]/counts.sum[i])
+ / pow(2.0, (weight_t) (N_-j+1));
+ //sum += exp(((score_t)counts.clipped[i]/counts.sum[i]))
+ // / pow(2.0, (weight_t) (N_-j+1));
+ //sum += exp(v[i] * log(((score_t)counts.clipped[i]/counts.sum[i])))
+ // / pow(2.0, (weight_t) (N_-j+1));
+ j++;
+ }
- inline void Reset() {
- onebest_len_ = 1;
- onebest_counts_.One();
- }
+ return brevity_penalty(hl, rl) * sum;
+ }
};
+/*
+ * Linear (Corpus) Bleu
+ * TODO
+ *
+ * as in "Lattice Minimum Bayes-Risk Decoding
+ * for Statistical Machine Translation"
+ * (Tromble et al. '08)
+ * or "Hope and fear for discriminative training of
+ * statistical translation models"
+ * (Chiang '12)
+ *
+ */
} // namespace
diff --git a/training/dtrain/update.h b/training/dtrain/update.h
new file mode 100644
index 00000000..405a3f76
--- /dev/null
+++ b/training/dtrain/update.h
@@ -0,0 +1,235 @@
+#ifndef _DTRAIN_UPDATE_H_
+#define _DTRAIN_UPDATE_H_
+
+namespace dtrain
+{
+
+/*
+ * multipartite [multi=3] ranking
+ * partitions are determined by the 'cut' parameter
+ * 0. sort sample (descending) by bleu
+ * 1. compare top X(=sz*cut) to middle Y(=sz-2*(sz*cut)) and bottom X
+ * -"- middle Y to bottom X
+ *
+ */
+inline size_t
+updates_multipartite(vector<Hyp>* sample,
+ SparseVector<weight_t>& updates,
+ weight_t cut,
+ weight_t margin,
+ size_t max_up,
+ weight_t threshold,
+ bool adjust,
+ WriteFile& output,
+ size_t id)
+{
+ size_t up = 0;
+ size_t sz = sample->size();
+ if (sz < 2) return 0;
+ sort(sample->begin(), sample->end(), [](Hyp first, Hyp second)
+ {
+ return first.gold > second.gold;
+ });
+ size_t sep = round(sz*cut);
+
+ size_t sep_hi = sep;
+ if (adjust) {
+ if (sz > 4) {
+ while (sep_hi<sz && (*sample)[sep_hi-1].gold==(*sample)[sep_hi].gold)
+ ++sep_hi;
+ } else {
+ sep_hi = 1;
+ }
+ }
+ for (size_t i = 0; i < sep_hi; i++) {
+ for (size_t j = sep_hi; j < sz; j++) {
+ Hyp& first=(*sample)[i], second=(*sample)[j];
+ if ((first.model-second.model)>margin
+ || (first.gold==second.gold)
+ || (threshold && (first.gold-second.gold < threshold)))
+ continue;
+ if (output)
+ *output << id << "\t" << first.f-second.f << endl;
+ updates += first.f-second.f;
+ if (++up==max_up)
+ return up;
+ }
+ }
+
+ size_t sep_lo = sz-sep;
+ if (adjust) {
+ while (sep_lo>0 && (*sample)[sep_lo-1].gold==(*sample)[sep_lo].gold)
+ --sep_lo;
+ }
+ for (size_t i = sep_hi; i < sep_lo; i++) {
+ for (size_t j = sep_lo; j < sz; j++) {
+ Hyp& first=(*sample)[i], second=(*sample)[j];
+ if ((first.model-second.model)>margin
+ || (first.gold==second.gold)
+ || (threshold && (first.gold-second.gold < threshold)))
+ continue;
+ if (output)
+ *output << id << "\t" << first.f-second.f << endl;
+ updates += first.f-second.f;
+ if (++up==max_up)
+ break;
+ }
+ }
+
+ return up;
+}
+
+/*
+ * all pairs
+ * only ignore a pair if gold scores are
+ * identical
+ *
+ */
+inline size_t
+updates_all(vector<Hyp>* sample,
+ SparseVector<weight_t>& updates,
+ size_t max_up,
+ weight_t threshold,
+ WriteFile output,
+ size_t id)
+{
+ size_t up = 0;
+ size_t sz = sample->size();
+ sort(sample->begin(), sample->end(), [](Hyp first, Hyp second)
+ {
+ return first.gold > second.gold;
+ });
+ for (size_t i = 0; i < sz-1; i++) {
+ for (size_t j = i+1; j < sz; j++) {
+ Hyp& first=(*sample)[i], second=(*sample)[j];
+ if ((first.gold == second.gold)
+ || (threshold && (first.gold-second.gold < threshold)))
+ continue;
+ if (output)
+ *output << id << "\t" << first.f-second.f << endl;
+ updates += first.f-second.f;
+ if (++up==max_up)
+ break;
+ }
+ }
+
+ return up;
+}
+
+/*
+ * hope/fear
+ * just one pair: hope - fear
+ *
+ */
+inline size_t
+update_structured(vector<Hyp>* sample,
+ SparseVector<weight_t>& updates,
+ weight_t margin,
+ WriteFile output,
+ size_t id)
+{
+ // hope
+ sort(sample->begin(), sample->end(), [](Hyp first, Hyp second)
+ {
+ return (first.model+first.gold) > (second.model+second.gold);
+ });
+ Hyp hope = (*sample)[0];
+ // fear
+ sort(sample->begin(), sample->end(), [](Hyp first, Hyp second)
+ {
+ return (first.model-first.gold) > (second.model-second.gold);
+ });
+ Hyp fear = (*sample)[0];
+
+ if (hope.gold != fear.gold) {
+ updates += hope.f - fear.f;
+ if (output)
+ *output << id << "\t" << hope.f << "\t" << fear.f << endl;
+
+ return 1;
+ }
+
+ if (output)
+ *output << endl;
+
+ return 0;
+}
+
+
+/*
+ * pair sampling as in
+ * 'Tuning as Ranking' (Hopkins & May, 2011)
+ * count = 5000 [maxs]
+ * threshold = 5% BLEU [threshold=0.05]
+ * cut = top 50 [max_up]
+ */
+inline size_t
+updates_pro(vector<Hyp>* sample,
+ SparseVector<weight_t>& updates,
+ size_t maxs,
+ size_t max_up,
+ weight_t threshold,
+ WriteFile& output,
+ size_t id)
+{
+
+ size_t sz = sample->size(), s;
+ vector<pair<Hyp*,Hyp*> > g;
+ while (s < maxs) {
+ size_t i=rand()%sz, j=rand()%sz;
+ Hyp& first=(*sample)[i], second=(*sample)[j];
+ if (i==j || fabs(first.gold-second.gold)<threshold)
+ continue;
+ if (first.gold > second.gold)
+ g.emplace_back(make_pair(&first,&second));
+ else
+ g.emplace_back(make_pair(&second,&first));
+ s++;
+ }
+
+ if (g.size() > max_up) {
+ sort(g.begin(), g.end(), [](pair<Hyp*,Hyp*> a, pair<Hyp*,Hyp*> b)
+ {
+ return fabs(a.first->gold-a.second->gold)
+ > fabs(b.first->gold-b.second->gold);
+ });
+ g.erase(g.begin()+max_up, g.end());
+ }
+
+ for (auto i: g) {
+ if (output)
+ *output << id << "\t" << i.first->f-i.second->f << endl;
+ updates += i.first->f-i.second->f;
+ }
+
+ return g.size();
+}
+
+/*
+ * output (sorted) items in sample (k-best list)
+ *
+ */
+inline void
+output_sample(vector<Hyp>* sample,
+ WriteFile& output,
+ size_t id=0,
+ bool sorted=true)
+{
+ if (sorted) {
+ sort(sample->begin(), sample->end(), [](Hyp first, Hyp second)
+ {
+ return first.gold > second.gold;
+ });
+ }
+ size_t j = 0;
+ for (auto k: *sample) {
+ *output << id << "\t" << j << "\t" << k.gold << "\t" << k.model
+ << "\t" << k.f << endl;
+ j++;
+ }
+}
+
+} // namespace
+
+#endif
+
diff --git a/training/mira/kbest_cut_mira.cc b/training/mira/kbest_cut_mira.cc
index 5d8385c2..353ebe0e 100644
--- a/training/mira/kbest_cut_mira.cc
+++ b/training/mira/kbest_cut_mira.cc
@@ -96,7 +96,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) {
("weights_output,O",po::value<string>(),"Directory to write weights to")
("output_dir,D",po::value<string>(),"Directory to place output in")
("decoder_config,c",po::value<string>(),"Decoder configuration file")
- ("verbose,v",po::value<bool>()->zero_tokens(),"verbose stderr output");
+ ("verbose,v",po::value<bool>()->zero_tokens(),"Verbose stderr output");
po::options_description clo("Command line options");
clo.add_options()
("config", po::value<string>(), "Configuration file")
@@ -104,7 +104,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) {
po::options_description dconfig_options, dcmdline_options;
dconfig_options.add(opts);
dcmdline_options.add(opts).add(clo);
-
+
po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
if (conf->count("config")) {
ifstream config((*conf)["config"].as<string>().c_str());
@@ -229,14 +229,15 @@ void CuttingPlane(vector<boost::shared_ptr<HypothesisInfo> >* cur_c, bool* again
}
-double ComputeDelta(vector<boost::shared_ptr<HypothesisInfo> >* cur_p, double max_step_size,vector<weight_t> dense_weights )
+double ComputeDelta(vector<boost::shared_ptr<HypothesisInfo> >* cur_p, double max_step_size,vector<weight_t> dense_weights, bool verbose = true )
{
vector<boost::shared_ptr<HypothesisInfo> >& cur_pair = *cur_p;
double loss = cur_pair[0]->oracle_loss - cur_pair[1]->oracle_loss;
double margin = -(cur_pair[0]->oracleN->features.dot(dense_weights)- cur_pair[0]->features.dot(dense_weights)) + (cur_pair[1]->oracleN->features.dot(dense_weights) - cur_pair[1]->features.dot(dense_weights));
const double num = margin + loss;
- cerr << "LOSS: " << num << " Margin:" << margin << " BLEUL:" << loss << " " << cur_pair[1]->features.dot(dense_weights) << " " << cur_pair[0]->features.dot(dense_weights) <<endl;
+ if (verbose)
+ cerr << "LOSS: " << num << " Margin:" << margin << " BLEUL:" << loss << " " << cur_pair[1]->features.dot(dense_weights) << " " << cur_pair[0]->features.dot(dense_weights) <<endl;
SparseVector<double> diff = cur_pair[0]->features;
@@ -704,7 +705,8 @@ int main(int argc, char** argv) {
SparseVector<double> old_lambdas = lambdas;
tot.clear();
tot += lambdas;
- cerr << "PASS " << cur_pass << " " << endl << lambdas << endl;
+ if (VERBOSE)
+ cerr << "PASS " << cur_pass << " " << endl << lambdas << endl;
ScoreP acc, acc_h, acc_f;
while(*in) {
@@ -841,7 +843,7 @@ int main(int argc, char** argv) {
cur_pair.clear();
cur_pair.push_back(cur_constraint[j]);
cur_pair.push_back(cur_constraint[i]);
- double delta = ComputeDelta(&cur_pair,max_step_size, dense_weights);
+ double delta = ComputeDelta(&cur_pair,max_step_size, dense_weights, VERBOSE);
if (delta == 0) optimize_again = false;
cur_constraint[j]->alpha += delta;
@@ -865,7 +867,7 @@ int main(int argc, char** argv) {
}
else if(optimizer == 2 || optimizer == 3) //PA and Cutting Plane MIRA update
{
- bool DEBUG_SMO= true;
+ bool DEBUG_SMO= false;
vector<boost::shared_ptr<HypothesisInfo> > cur_constraint;
cur_constraint.push_back(cur_good_v[0]); //add oracle to constraint set
bool optimize_again = true;
@@ -914,7 +916,7 @@ int main(int argc, char** argv) {
continue;
} //pair is undefined so we are done with this smo
- double delta = ComputeDelta(&cur_pair,max_step_size, dense_weights);
+ double delta = ComputeDelta(&cur_pair,max_step_size, dense_weights, VERBOSE);
cur_pair[0]->alpha += delta;
cur_pair[1]->alpha -= delta;
@@ -928,7 +930,7 @@ int main(int argc, char** argv) {
//reload weights based on update
dense_weights.clear();
lambdas.init_vector(&dense_weights);
- if (dense_weights.size() < 500)
+ if (VERBOSE && dense_weights.size() < 500)
ShowLargestFeatures(dense_weights);
dense_w_local = dense_weights;
iter++;
@@ -968,12 +970,14 @@ int main(int argc, char** argv) {
for(int u=0;u!=cur_constraint.size();u++)
{
- cerr << "alpha=" << cur_constraint[u]->alpha << " hope=" << cur_constraint[u]->hope << " fear=" << cur_constraint[u]->fear << endl;
+ if (VERBOSE)
+ cerr << "alpha=" << cur_constraint[u]->alpha << " hope=" << cur_constraint[u]->hope << " fear=" << cur_constraint[u]->fear << endl;
temp_objective += cur_constraint[u]->alpha * cur_constraint[u]->fear;
}
objective += temp_objective;
- cerr << "SENT OBJ: " << temp_objective << " NEW OBJ: " << objective << endl;
+ if (VERBOSE)
+ cerr << "SENT OBJ: " << temp_objective << " NEW OBJ: " << objective << endl;
}
diff --git a/training/mira/kbest_mira.cc b/training/mira/kbest_mira.cc
index 2868de0c..07718a7f 100644
--- a/training/mira/kbest_mira.cc
+++ b/training/mira/kbest_mira.cc
@@ -57,7 +57,8 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) {
("sample_forest,f", "Instead of a k-best list, sample k hypotheses from the decoder's forest")
("sample_forest_unit_weight_vector,x", "Before sampling (must use -f option), rescale the weight vector used so it has unit length; this may improve the quality of the samples")
("random_seed,S", po::value<uint32_t>(), "Random seed (if not specified, /dev/random will be used)")
- ("decoder_config,c",po::value<string>(),"Decoder configuration file");
+ ("decoder_config,c",po::value<string>(),"Decoder configuration file")
+ ("verbose,v", po::value<bool>()->zero_tokens(), "verbose stderr output");
po::options_description clo("Command line options");
clo.add_options()
("config", po::value<string>(), "Configuration file")
@@ -188,6 +189,8 @@ int main(int argc, char** argv) {
po::variables_map conf;
if (!InitCommandLine(argc, argv, &conf)) return 1;
+ const bool VERBOSE = conf.count("verbose");
+
if (conf.count("random_seed"))
rng.reset(new MT19937(conf["random_seed"].as<uint32_t>()));
else
@@ -254,7 +257,8 @@ int main(int argc, char** argv) {
if ((cur_sent * 40 / corpus.size()) > dots) { ++dots; cerr << '.'; }
if (corpus.size() == cur_sent) {
cerr << " [AVG METRIC LAST PASS=" << (tot_loss / corpus.size()) << "]\n";
- Weights::ShowLargestFeatures(dense_weights);
+ if (VERBOSE)
+ Weights::ShowLargestFeatures(dense_weights);
cur_sent = 0;
tot_loss = 0;
dots = 0;
diff --git a/training/pro/pro.pl b/training/pro/pro.pl
index a059477d..0517a781 100755
--- a/training/pro/pro.pl
+++ b/training/pro/pro.pl
@@ -73,6 +73,7 @@ if (GetOptions(
"weights=s" => \$initial_weights,
"devset=s" => \$devset,
"jobs=i" => \$jobs,
+ "max-iterations=i" => \$max_iterations,
"metric=s" => \$metric,
"pass-suffix=s" => \$pass_suffix,
"qsub" => \$useqsub,