diff options
70 files changed, 1326 insertions, 2012 deletions
@@ -45,6 +45,7 @@ decoder/ff_test decoder/grammar_test decoder/hg_test decoder/logval_test +decoder/minimal_decoder decoder/parser_test decoder/rule_lexer.cc decoder/small_vector_test diff --git a/decoder/Makefile.am b/decoder/Makefile.am index dbec532e..e313f1f9 100644 --- a/decoder/Makefile.am +++ b/decoder/Makefile.am @@ -1,4 +1,4 @@ -bin_PROGRAMS = cdec +bin_PROGRAMS = cdec minimal_decoder noinst_PROGRAMS = \ trule_test \ @@ -23,6 +23,9 @@ cdec_SOURCES = cdec.cc cdec_LDFLAGS= -rdynamic $(STATIC_FLAGS) cdec_LDADD = libcdec.a ../mteval/libmteval.a ../utils/libutils.a ../klm/search/libksearch.a ../klm/lm/libklm.a ../klm/util/libklm_util.a ../klm/util/double-conversion/libklm_util_double.a +minimal_decoder_SOURCES = minimal_decoder.cc +minimal_decoder_LDADD = libcdec.a ../utils/libutils.a + AM_CPPFLAGS = -DTEST_DATA=\"$(top_srcdir)/decoder/test_data\" -DBOOST_TEST_DYN_LINK -W -Wno-sign-compare -I$(top_srcdir) -I$(top_srcdir)/mteval -I$(top_srcdir)/utils -I$(top_srcdir)/klm rule_lexer.cc: rule_lexer.ll diff --git a/decoder/minimal_decoder.cc b/decoder/minimal_decoder.cc new file mode 100644 index 00000000..a9eb36bc --- /dev/null +++ b/decoder/minimal_decoder.cc @@ -0,0 +1,47 @@ +#include <fstream> +#include <iostream> +#include <sstream> + +#include "fdict.h" +#include "filelib.h" +#include "hg.h" +#include "hg_io.h" +#include "sparse_vector.h" +#include "viterbi.h" + + +using namespace std; + +/* + * Reads hypergraph from JSON file argv[1], + * reweights it using weights from argv[2], + * and outputs viterbi translation. + * + */ +int main(int argc, char** argv) +{ + ReadFile rf(argv[1]); + Hypergraph hg; + HypergraphIO::ReadFromBinary(rf.stream(), &hg); + SparseVector<double> v; + ifstream f(argv[2]); + string line; + while (getline(f, line)) { + istringstream ss(line); + string k; weight_t w; + ss >> k >> w; + v.add_value(FD::Convert(k), w); + } + hg.Reweight(v); + clock_t begin = clock(); + hg.TopologicallySortNodesAndEdges(hg.NumberOfNodes()-1); + vector<WordID> trans; + ViterbiESentence(hg, &trans); + cout << TD::GetString(trans) << endl << flush; + clock_t end = clock(); + double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; + cout << elapsed_secs << " s" << endl; + + return 0; +} + diff --git a/extractor/Makefile.am b/extractor/Makefile.am index a406d9dc..cdfbb307 100644 --- a/extractor/Makefile.am +++ b/extractor/Makefile.am @@ -115,7 +115,7 @@ noinst_LIBRARIES = libextractor.a sacompile_SOURCES = sacompile.cc sacompile_LDADD = libextractor.a run_extractor_SOURCES = run_extractor.cc -run_extractor_LDADD = libextractor.a +run_extractor_LDADD = libextractor.a ../utils/libutils.a extract_SOURCES = extract.cc extract_LDADD = libextractor.a diff --git a/extractor/run_extractor.cc b/extractor/run_extractor.cc index 00564a36..75fae627 100644 --- a/extractor/run_extractor.cc +++ b/extractor/run_extractor.cc @@ -33,6 +33,7 @@ #include "time_util.h" #include "translation_table.h" #include "vocabulary.h" +#include "../utils/filelib.h" namespace fs = boost::filesystem; namespace po = boost::program_options; @@ -42,7 +43,7 @@ using namespace features; // Returns the file path in which a given grammar should be written. fs::path GetGrammarFilePath(const fs::path& grammar_path, int file_number) { - string file_name = "grammar." + to_string(file_number); + string file_name = "grammar." + to_string(file_number) + ".gz"; return grammar_path / file_name; } @@ -239,8 +240,8 @@ int main(int argc, char** argv) { } Grammar grammar = extractor.GetGrammar( sentences[i], blacklisted_sentence_ids); - ofstream output(GetGrammarFilePath(grammar_path, i).c_str()); - output << grammar; + WriteFile output(GetGrammarFilePath(grammar_path, i).c_str()); + *output << grammar; } for (size_t i = 0; i < sentences.size(); ++i) { diff --git a/patches/mira_hack_fix_wp.patch b/patches/mira_hack_fix_wp.patch new file mode 100644 index 00000000..dfe9965d --- /dev/null +++ b/patches/mira_hack_fix_wp.patch @@ -0,0 +1,38 @@ +diff --git a/training/mira/kbest_cut_mira.cc b/training/mira/kbest_cut_mira.cc +index 724b185..1224f0c 100644 +--- a/training/mira/kbest_cut_mira.cc ++++ b/training/mira/kbest_cut_mira.cc +@@ -750,6 +750,7 @@ int main(int argc, char** argv) { + } + // Regular mode or LEARN line from stream mode + //TODO: allow batch updating ++ lambdas[FD::Convert("WordPenalty")] = -1.0; // HACK WP + lambdas.init_vector(&dense_weights); + dense_w_local = dense_weights; + decoder.SetId(cur_sent); +@@ -781,7 +782,8 @@ int main(int argc, char** argv) { + acc_f->PlusEquals(*fear_sentscore); + + if(optimizer == 4) { //passive-aggresive update (single dual coordinate step) +- ++ ++ dense_weights[FD::Convert("WordPenalty")] = -1.0; // HACK WP + double margin = cur_bad.features.dot(dense_weights) - cur_good.features.dot(dense_weights); + double mt_loss = (cur_good.mt_metric - cur_bad.mt_metric); + const double loss = margin + mt_loss; +@@ -927,6 +929,7 @@ int main(int argc, char** argv) { + + //reload weights based on update + dense_weights.clear(); ++ lambdas[FD::Convert("WordPenalty")] = -1.0; // HACK WP + lambdas.init_vector(&dense_weights); + if (dense_weights.size() < 500) + ShowLargestFeatures(dense_weights); +@@ -1001,6 +1004,7 @@ int main(int argc, char** argv) { + ostringstream os; + os << weights_dir << "/weights.mira-pass" << (cur_pass < 10 ? "0" : "") << cur_pass << "." << node_id << ".gz"; + string msg = "# MIRA tuned weights ||| " + boost::lexical_cast<std::string>(node_id) + " ||| " + boost::lexical_cast<std::string>(lcount); ++ lambdas[FD::Convert("WordPenalty")] = -1.0; // HACK WP + lambdas.init_vector(&dense_weights); + Weights::WriteToFile(os.str(), dense_weights, true, &msg); + diff --git a/training/dtrain/Makefile.am b/training/dtrain/Makefile.am index 844c790d..aadd376d 100644 --- a/training/dtrain/Makefile.am +++ b/training/dtrain/Makefile.am @@ -1,6 +1,6 @@ bin_PROGRAMS = dtrain -dtrain_SOURCES = dtrain.cc score.cc dtrain.h kbestget.h ksampler.h pairsampling.h score.h +dtrain_SOURCES = dtrain.cc dtrain.h sample.h update.h score.h dtrain_LDADD = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc index ccb50af2..1b7047b0 100644 --- a/training/dtrain/dtrain.cc +++ b/training/dtrain/dtrain.cc @@ -1,698 +1,227 @@ #include "dtrain.h" #include "score.h" -#include "kbestget.h" -#include "ksampler.h" -#include "pairsampling.h" +#include "sample.h" +#include "update.h" using namespace dtrain; - -bool -dtrain_init(int argc, char** argv, po::variables_map* cfg) -{ - po::options_description ini("Configuration File Options"); - ini.add_options() - ("input", po::value<string>(), "input file (src)") - ("refs,r", po::value<string>(), "references") - ("bitext,b", po::value<string>(), "bitext: 'src ||| tgt'") - ("output", po::value<string>()->default_value("-"), "output weights file, '-' for STDOUT") - ("input_weights", po::value<string>(), "input weights file (e.g. from previous iteration)") - ("decoder_config", po::value<string>(), "configuration file for cdec") - ("print_weights", po::value<string>(), "weights to print on each iteration") - ("stop_after", po::value<unsigned>()->default_value(0), "stop after X input sentences") - ("keep", po::value<bool>()->zero_tokens(), "keep weights files for each iteration") - ("epochs", po::value<unsigned>()->default_value(10), "# of iterations T (per shard)") - ("k", po::value<unsigned>()->default_value(100), "how many translations to sample") - ("sample_from", po::value<string>()->default_value("kbest"), "where to sample translations from: 'kbest', 'forest'") - ("filter", po::value<string>()->default_value("uniq"), "filter kbest list: 'not', 'uniq'") - ("pair_sampling", po::value<string>()->default_value("XYX"), "how to sample pairs: 'all', 'XYX' or 'PRO'") - ("hi_lo", po::value<float>()->default_value(0.1), "hi and lo (X) for XYX (default 0.1), <= 0.5") - ("pair_threshold", po::value<score_t>()->default_value(0.), "bleu [0,1] threshold to filter pairs") - ("N", po::value<unsigned>()->default_value(4), "N for Ngrams (BLEU)") - ("scorer", po::value<string>()->default_value("stupid_bleu"), "scoring: bleu, stupid_, smooth_, approx_, lc_") - ("learning_rate", po::value<weight_t>()->default_value(1.0), "learning rate") - ("gamma", po::value<weight_t>()->default_value(0.), "gamma for SVM (0 for perceptron)") - ("select_weights", po::value<string>()->default_value("last"), "output best, last, avg weights ('VOID' to throw away)") - ("rescale", po::value<bool>()->zero_tokens(), "rescale weight vector after each input") - ("l1_reg", po::value<string>()->default_value("none"), "apply l1 regularization as in 'Tsuroka et al' (2010) UNTESTED") - ("l1_reg_strength", po::value<weight_t>(), "l1 regularization strength") - ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO - ("approx_bleu_d", po::value<score_t>()->default_value(0.9), "discount for approx. BLEU") - ("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair") - ("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near") - ("max_pairs", po::value<unsigned>()->default_value(std::numeric_limits<unsigned>::max()), "max. # of pairs per Sent.") - ("pclr", po::value<string>()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate") - ("batch", po::value<bool>()->zero_tokens(), "do batch optimization") - ("repeat", po::value<unsigned>()->default_value(1), "repeat optimization over kbest list this number of times") - ("check", po::value<bool>()->zero_tokens(), "produce list of loss differentials") - ("noup", po::value<bool>()->zero_tokens(), "do not update weights"); - po::options_description cl("Command Line Options"); - cl.add_options() - ("config,c", po::value<string>(), "dtrain config file") - ("quiet,q", po::value<bool>()->zero_tokens(), "be quiet") - ("verbose,v", po::value<bool>()->zero_tokens(), "be verbose"); - cl.add(ini); - po::store(parse_command_line(argc, argv, cl), *cfg); - if (cfg->count("config")) { - ifstream ini_f((*cfg)["config"].as<string>().c_str()); - po::store(po::parse_config_file(ini_f, ini), *cfg); - } - po::notify(*cfg); - if (!cfg->count("decoder_config")) { - cerr << cl << endl; - return false; - } - if ((*cfg)["sample_from"].as<string>() != "kbest" - && (*cfg)["sample_from"].as<string>() != "forest") { - cerr << "Wrong 'sample_from' param: '" << (*cfg)["sample_from"].as<string>() << "', use 'kbest' or 'forest'." << endl; - return false; - } - if ((*cfg)["sample_from"].as<string>() == "kbest" && (*cfg)["filter"].as<string>() != "uniq" && - (*cfg)["filter"].as<string>() != "not") { - cerr << "Wrong 'filter' param: '" << (*cfg)["filter"].as<string>() << "', use 'uniq' or 'not'." << endl; - return false; - } - if ((*cfg)["pair_sampling"].as<string>() != "all" && (*cfg)["pair_sampling"].as<string>() != "XYX" && - (*cfg)["pair_sampling"].as<string>() != "PRO") { - cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as<string>() << "'." << endl; - return false; - } - if (cfg->count("hi_lo") && (*cfg)["pair_sampling"].as<string>() != "XYX") { - cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl; - } - if ((*cfg)["hi_lo"].as<float>() > 0.5 || (*cfg)["hi_lo"].as<float>() < 0.01) { - cerr << "hi_lo must lie in [0.01, 0.5]" << endl; - return false; - } - if ((cfg->count("input")>0 || cfg->count("refs")>0) && cfg->count("bitext")>0) { - cerr << "Provide 'input' and 'refs' or 'bitext', not both." << endl; - return false; - } - if ((*cfg)["pair_threshold"].as<score_t>() < 0) { - cerr << "The threshold must be >= 0!" << endl; - return false; - } - if ((*cfg)["select_weights"].as<string>() != "last" && (*cfg)["select_weights"].as<string>() != "best" && - (*cfg)["select_weights"].as<string>() != "avg" && (*cfg)["select_weights"].as<string>() != "VOID") { - cerr << "Wrong 'select_weights' param: '" << (*cfg)["select_weights"].as<string>() << "', use 'last' or 'best'." << endl; - return false; - } - return true; -} - int main(int argc, char** argv) { - // handle most parameters - po::variables_map cfg; - if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong - bool quiet = false; - if (cfg.count("quiet")) quiet = true; - bool verbose = false; - if (cfg.count("verbose")) verbose = true; - bool noup = false; - if (cfg.count("noup")) noup = true; - bool rescale = false; - if (cfg.count("rescale")) rescale = true; - bool keep = false; - if (cfg.count("keep")) keep = true; - - const unsigned k = cfg["k"].as<unsigned>(); - const unsigned N = cfg["N"].as<unsigned>(); - const unsigned T = cfg["epochs"].as<unsigned>(); - const unsigned stop_after = cfg["stop_after"].as<unsigned>(); - const string filter_type = cfg["filter"].as<string>(); - const string sample_from = cfg["sample_from"].as<string>(); - const string pair_sampling = cfg["pair_sampling"].as<string>(); - const score_t pair_threshold = cfg["pair_threshold"].as<score_t>(); - const string select_weights = cfg["select_weights"].as<string>(); - const float hi_lo = cfg["hi_lo"].as<float>(); - const score_t approx_bleu_d = cfg["approx_bleu_d"].as<score_t>(); - const unsigned max_pairs = cfg["max_pairs"].as<unsigned>(); - int repeat = cfg["repeat"].as<unsigned>(); - bool check = false; - if (cfg.count("check")) check = true; - weight_t loss_margin = cfg["loss_margin"].as<weight_t>(); - bool batch = false; - if (cfg.count("batch")) batch = true; - if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max(); - bool scale_bleu_diff = false; - if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true; - const string pclr = cfg["pclr"].as<string>(); - bool average = false; - if (select_weights == "avg") - average = true; + // get configuration + po::variables_map conf; + if (!dtrain_init(argc, argv, &conf)) + exit(1); // something is wrong + const size_t k = conf["k"].as<size_t>(); + const size_t N = conf["N"].as<size_t>(); + const size_t T = conf["iterations"].as<size_t>(); + const weight_t eta = conf["learning_rate"].as<weight_t>(); + const weight_t error_margin = conf["error_margin"].as<weight_t>(); + const bool average = conf["average"].as<bool>(); + const bool keep = conf["keep"].as<bool>(); + const weight_t l1_reg = conf["l1_reg"].as<weight_t>(); + const string output_fn = conf["output"].as<string>(); vector<string> print_weights; - if (cfg.count("print_weights")) - boost::split(print_weights, cfg["print_weights"].as<string>(), boost::is_any_of(" ")); + boost::split(print_weights, conf["print_weights"].as<string>(), boost::is_any_of(" ")); // setup decoder register_feature_functions(); SetSilent(true); - ReadFile ini_rf(cfg["decoder_config"].as<string>()); - if (!quiet) - cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; - Decoder decoder(ini_rf.stream()); - - // scoring metric/scorer - string scorer_str = cfg["scorer"].as<string>(); - LocalScorer* scorer; - if (scorer_str == "bleu") { - scorer = static_cast<BleuScorer*>(new BleuScorer); - } else if (scorer_str == "stupid_bleu") { - scorer = static_cast<StupidBleuScorer*>(new StupidBleuScorer); - } else if (scorer_str == "fixed_stupid_bleu") { - scorer = static_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer); - } else if (scorer_str == "smooth_bleu") { - scorer = static_cast<SmoothBleuScorer*>(new SmoothBleuScorer); - } else if (scorer_str == "sum_bleu") { - scorer = static_cast<SumBleuScorer*>(new SumBleuScorer); - } else if (scorer_str == "sumexp_bleu") { - scorer = static_cast<SumExpBleuScorer*>(new SumExpBleuScorer); - } else if (scorer_str == "sumwhatever_bleu") { - scorer = static_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer); - } else if (scorer_str == "approx_bleu") { - scorer = static_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d)); - } else if (scorer_str == "lc_bleu") { - scorer = static_cast<LinearBleuScorer*>(new LinearBleuScorer(N)); - } else { - cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; - exit(1); - } - vector<score_t> bleu_weights; - scorer->Init(N, bleu_weights); + ReadFile f(conf["decoder_config"].as<string>()); + Decoder decoder(f.stream()); // setup decoder observer - MT19937 rng; // random number generator, only for forest sampling - HypSampler* observer; - if (sample_from == "kbest") - observer = static_cast<KBestGetter*>(new KBestGetter(k, filter_type)); - else - observer = static_cast<KSampler*>(new KSampler(k, &rng)); - observer->SetScorer(scorer); - - // init weights + ScoredKbest* observer = new ScoredKbest(k, new PerSentenceBleuScorer(N)); + + // weights vector<weight_t>& decoder_weights = decoder.CurrentWeightVector(); - SparseVector<weight_t> lambdas, cumulative_penalties, w_average; - if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as<string>(), &decoder_weights); - Weights::InitSparseVector(decoder_weights, &lambdas); - - // meta params for perceptron, SVM - weight_t eta = cfg["learning_rate"].as<weight_t>(); - weight_t gamma = cfg["gamma"].as<weight_t>(); - - // faster perceptron: consider only misranked pairs, see - bool faster_perceptron = false; - if (gamma==0 && loss_margin==0) faster_perceptron = true; - - // l1 regularization - bool l1naive = false; - bool l1clip = false; - bool l1cumul = false; - weight_t l1_reg = 0; - if (cfg["l1_reg"].as<string>() != "none") { - string s = cfg["l1_reg"].as<string>(); - if (s == "naive") l1naive = true; - else if (s == "clip") l1clip = true; - else if (s == "cumul") l1cumul = true; - l1_reg = cfg["l1_reg_strength"].as<weight_t>(); + SparseVector<weight_t> lambdas, w_average; + if (conf.count("input_weights")) { + Weights::InitFromFile(conf["input_weights"].as<string>(), &decoder_weights); + Weights::InitSparseVector(decoder_weights, &lambdas); } - // output - string output_fn = cfg["output"].as<string>(); // input - bool read_bitext = false; - string input_fn; - if (cfg.count("bitext")) { - read_bitext = true; - input_fn = cfg["bitext"].as<string>(); - } else { - input_fn = cfg["input"].as<string>(); - } + string input_fn = conf["bitext"].as<string>(); ReadFile input(input_fn); - // buffer input for t > 0 - vector<string> src_str_buf; // source strings (decoder takes only strings) - vector<vector<WordID> > ref_ids_buf; // references as WordID vecs - ReadFile refs; - string refs_fn; - if (!read_bitext) { - refs_fn = cfg["refs"].as<string>(); - refs.Init(refs_fn); - } - - unsigned in_sz = std::numeric_limits<unsigned>::max(); // input index, input size - vector<pair<score_t, score_t> > all_scores; - score_t max_score = 0.; - unsigned best_it = 0; - float overall_time = 0.; - - // output cfg - if (!quiet) { - cerr << _p5; - cerr << endl << "dtrain" << endl << "Parameters:" << endl; - cerr << setw(25) << "k " << k << endl; - cerr << setw(25) << "N " << N << endl; - cerr << setw(25) << "T " << T << endl; - cerr << setw(25) << "batch " << batch << endl; - cerr << setw(26) << "scorer '" << scorer_str << "'" << endl; - if (scorer_str == "approx_bleu") - cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl; - cerr << setw(25) << "sample from " << "'" << sample_from << "'" << endl; - if (sample_from == "kbest") - cerr << setw(25) << "filter " << "'" << filter_type << "'" << endl; - if (!scale_bleu_diff) cerr << setw(25) << "learning rate " << eta << endl; - else cerr << setw(25) << "learning rate " << "bleu diff" << endl; - cerr << setw(25) << "gamma " << gamma << endl; - cerr << setw(25) << "loss margin " << loss_margin << endl; - cerr << setw(25) << "faster perceptron " << faster_perceptron << endl; - cerr << setw(25) << "pairs " << "'" << pair_sampling << "'" << endl; - if (pair_sampling == "XYX") - cerr << setw(25) << "hi lo " << hi_lo << endl; - cerr << setw(25) << "pair threshold " << pair_threshold << endl; - cerr << setw(25) << "select weights " << "'" << select_weights << "'" << endl; - if (cfg.count("l1_reg")) - cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as<string>() << "'" << endl; - if (rescale) - cerr << setw(25) << "rescale " << rescale << endl; - cerr << setw(25) << "pclr " << pclr << endl; - cerr << setw(25) << "max pairs " << max_pairs << endl; - cerr << setw(25) << "repeat " << repeat << endl; - //cerr << setw(25) << "test k-best " << test_k_best << endl; - cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; - cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; - if (!read_bitext) - cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl; - cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; - if (cfg.count("input_weights")) - cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as<string>() << "'" << endl; - if (stop_after > 0) - cerr << setw(25) << "stop_after " << stop_after << endl; - if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl; - } - - // pclr - SparseVector<weight_t> learning_rates; - // batch - SparseVector<weight_t> batch_updates; - score_t batch_loss; - - for (unsigned t = 0; t < T; t++) // T epochs + vector<string> buf; // source strings (decoder takes only strings) + vector<vector<Ngrams> > buf_ngs; // compute ngrams and lengths of references + vector<vector<size_t> > buf_ls; // just once + size_t input_sz = 0; + + // output configuration + cerr << _p5 << "dtrain" << endl << "Parameters:" << endl; + cerr << setw(25) << "k " << k << endl; + cerr << setw(25) << "N " << N << endl; + cerr << setw(25) << "T " << T << endl; + cerr << setw(25) << "learning rate " << eta << endl; + cerr << setw(25) << "error margin " << error_margin << endl; + cerr << setw(25) << "l1 reg " << l1_reg << endl; + cerr << setw(25) << "decoder conf " << "'" << conf["decoder_config"].as<string>() << "'" << endl; + cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; + cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; + if (conf.count("input_weights")) + cerr << setw(25) << "weights in " << "'" << conf["input_weights"].as<string>() << "'" << endl; + cerr << "(a dot per input)" << endl; + + // meta + weight_t best=0., gold_prev=0.; + size_t best_iteration = 0; + time_t total_time = 0.; + + for (size_t t = 0; t < T; t++) // T iterations { time_t start, end; time(&start); - score_t score_sum = 0.; - score_t model_sum(0); - unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0, kbest_loss_improve = 0; - batch_loss = 0.; - if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl; + weight_t gold_sum=0., model_sum=0.; + size_t i = 0, num_pairs = 0, feature_count = 0, list_sz = 0; + + cerr << "Iteration #" << t+1 << " of " << T << "." << endl; while(true) { + bool next = true; - string in; - string ref; - bool next = false, stop = false; // next iteration or premature stop + // getting input if (t == 0) { - if(!getline(*input, in)) next = true; - if(read_bitext) { - vector<string> strs; - boost::algorithm::split_regex(strs, in, boost::regex(" \\|\\|\\| ")); - in = strs[0]; - ref = strs[1]; - } - } else { - if (ii == in_sz) next = true; // stop if we reach the end of our input - } - // stop after X sentences (but still go on for those) - if (stop_after > 0 && stop_after == ii && !next) stop = true; - - // produce some pretty output - if (!quiet && !verbose) { - if (ii == 0) cerr << " "; - if ((ii+1) % (DTRAIN_DOTS) == 0) { - cerr << "."; - cerr.flush(); - } - if ((ii+1) % (20*DTRAIN_DOTS) == 0) { - cerr << " " << ii+1 << endl; - if (!next && !stop) cerr << " "; - } - if (stop) { - if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; - cerr << "Stopping after " << stop_after << " input sentences." << endl; + string in; + if(!getline(*input, in)) { + next = false; } else { - if (next) { - if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; + vector<string> parts; + boost::algorithm::split_regex(parts, in, boost::regex(" \\|\\|\\| ")); + buf.push_back(parts[0]); + parts.erase(parts.begin()); + buf_ngs.push_back({}); + buf_ls.push_back({}); + for (auto s: parts) { + vector<WordID> r; + vector<string> tok; + boost::split(tok, s, boost::is_any_of(" ")); + RegisterAndConvert(tok, r); + buf_ngs.back().emplace_back(MakeNgrams(r, N)); + buf_ls.back().push_back(r.size()); } } - } - - // next iteration - if (next || stop) break; - - // weights - lambdas.init_vector(&decoder_weights); - - // getting input - vector<WordID> ref_ids; // reference as vector<WordID> - if (t == 0) { - if (!read_bitext) { - getline(*refs, ref); - } - vector<string> ref_tok; - boost::split(ref_tok, ref, boost::is_any_of(" ")); - register_and_convert(ref_tok, ref_ids); - ref_ids_buf.push_back(ref_ids); - src_str_buf.push_back(in); } else { - ref_ids = ref_ids_buf[ii]; + next = i<input_sz; } - observer->SetRef(ref_ids); - if (t == 0) - decoder.Decode(in, observer); - else - decoder.Decode(src_str_buf[ii], observer); - // get (scored) samples + // produce some pretty output + if (i == 0 || (i+1)%20==0) + cerr << " "; + cerr << "."; + cerr.flush(); + if (!next) + if (i%20 != 0) cerr << " " << i << endl; + + // stop iterating + if (!next) break; + + // decode + if (t > 0 || i > 0) + lambdas.init_vector(&decoder_weights); + observer->SetReference(buf_ngs[i], buf_ls[i]); + decoder.Decode(buf[i], observer); vector<ScoredHyp>* samples = observer->GetSamples(); - if (verbose) { - cerr << "--- ref for " << ii << ": "; - if (t > 0) printWordIDVec(ref_ids_buf[ii]); - else printWordIDVec(ref_ids); - cerr << endl; - for (unsigned u = 0; u < samples->size(); u++) { - cerr << _p2 << _np << "[" << u << ". '"; - printWordIDVec((*samples)[u].w); - cerr << "'" << endl; - cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl; - cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl; - } - } - - if (repeat == 1) { - score_sum += (*samples)[0].score; // stats for 1best - model_sum += (*samples)[0].model; - } - - f_count += observer->get_f_count(); - list_sz += observer->get_sz(); - - // weight updates - if (!noup) { - // get pairs - vector<pair<ScoredHyp,ScoredHyp> > pairs; - if (pair_sampling == "all") - all_pairs(samples, pairs, pair_threshold, max_pairs, faster_perceptron); - if (pair_sampling == "XYX") - partXYX(samples, pairs, pair_threshold, max_pairs, faster_perceptron, hi_lo); - if (pair_sampling == "PRO") - PROsampling(samples, pairs, pair_threshold, max_pairs); - int cur_npairs = pairs.size(); - npairs += cur_npairs; - - score_t kbest_loss_first = 0.0, kbest_loss_last = 0.0; - - if (check) repeat = 2; - vector<float> losses; // for check - - for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); - it != pairs.end(); it++) { - score_t model_diff = it->first.model - it->second.model; - score_t loss = max(0.0, -1.0 * model_diff); - losses.push_back(loss); - kbest_loss_first += loss; - } - - score_t kbest_loss = 0.0; - for (int ki=0; ki < repeat; ki++) { - - SparseVector<weight_t> lambdas_copy; // for l1 regularization - SparseVector<weight_t> sum_up; // for pclr - if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas; - - unsigned pair_idx = 0; // for check - for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); - it != pairs.end(); it++) { - score_t model_diff = it->first.model - it->second.model; - score_t loss = max(0.0, -1.0 * model_diff); - - if (check && ki==repeat-1) cout << losses[pair_idx] - loss << endl; - pair_idx++; - - if (repeat > 1) { - model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f); - kbest_loss += loss; - } - bool rank_error = false; - score_t margin; - if (faster_perceptron) { // we only have considering misranked pairs - rank_error = true; // pair sampling already did this for us - margin = std::numeric_limits<float>::max(); - } else { - rank_error = model_diff<=0.0; - margin = fabs(model_diff); - if (!rank_error && margin < loss_margin) margin_violations++; - } - if (rank_error && ki==0) rank_errors++; - if (scale_bleu_diff) eta = it->first.score - it->second.score; - if (rank_error || margin < loss_margin) { - SparseVector<weight_t> diff_vec = it->first.f - it->second.f; - if (batch) { - batch_loss += max(0., -1.0 * model_diff); - batch_updates += diff_vec; - continue; - } - if (pclr != "no") { - sum_up += diff_vec; + // stats for 1best + gold_sum += samples->front().gold; + model_sum += samples->front().model; + feature_count += observer->GetFeatureCount(); + list_sz += observer->GetSize(); + + // get pairs and update + vector<pair<ScoredHyp,ScoredHyp> > pairs; + SparseVector<weight_t> updates; + num_pairs += CollectUpdates(samples, updates, error_margin); + SparseVector<weight_t> lambdas_copy; + if (l1_reg) + lambdas_copy = lambdas; + lambdas.plus_eq_v_times_s(updates, eta); + + // l1 regularization + // NB: regularization is done after each sentence, + // not after every single pair! + if (l1_reg) { + SparseVector<weight_t>::iterator it = lambdas.begin(); + for (; it != lambdas.end(); ++it) { + if (it->second == 0) continue; + if (!lambdas_copy.get(it->first) // new or.. + || lambdas_copy.get(it->first)!=it->second) // updated feature + { + weight_t v = it->second; + if (v > 0) { + it->second = max(0., v - l1_reg); } else { - lambdas.plus_eq_v_times_s(diff_vec, eta); - if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./cur_npairs)); + it->second = min(0., v + l1_reg); } } } + } - // per-coordinate learning rate - if (pclr != "no") { - SparseVector<weight_t>::iterator it = sum_up.begin(); - for (; it != sum_up.end(); ++it) { - if (pclr == "simple") { - lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]); - learning_rates[it->first]++; - } else if (pclr == "adagrad") { - if (learning_rates[it->first] == 0) { - lambdas[it->first] += it->second * eta; - } else { - lambdas[it->first] += it->second * eta * learning_rates[it->first]; - } - learning_rates[it->first] += pow(it->second, 2.0); - } - } - } - - // l1 regularization - // please note that this regularizations happen - // after a _sentence_ -- not after each example/pair! - if (l1naive) { - SparseVector<weight_t>::iterator it = lambdas.begin(); - for (; it != lambdas.end(); ++it) { - if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { - it->second *= max(0.0000001, eta/(eta+learning_rates[it->first])); // FIXME - learning_rates[it->first]++; - it->second -= sign(it->second) * l1_reg; - } - } - } else if (l1clip) { - SparseVector<weight_t>::iterator it = lambdas.begin(); - for (; it != lambdas.end(); ++it) { - if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { - if (it->second != 0) { - weight_t v = it->second; - if (v > 0) { - it->second = max(0., v - l1_reg); - } else { - it->second = min(0., v + l1_reg); - } - } - } - } - } else if (l1cumul) { - weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input - SparseVector<weight_t>::iterator it = lambdas.begin(); - for (; it != lambdas.end(); ++it) { - if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { - if (it->second != 0) { - weight_t v = it->second; - weight_t penalized = 0.; - if (v > 0) { - penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first))); - } else { - penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first))); - } - it->second = penalized; - cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized); - } - } - } - } - - if (ki==repeat-1) { // done - kbest_loss_last = kbest_loss; - if (repeat > 1) { - score_t best_model = -std::numeric_limits<score_t>::max(); - unsigned best_idx = 0; - for (unsigned i=0; i < samples->size(); i++) { - score_t s = lambdas.dot((*samples)[i].f); - if (s > best_model) { - best_idx = i; - best_model = s; - } - } - score_sum += (*samples)[best_idx].score; - model_sum += best_model; - } - } - } // repeat - - if ((kbest_loss_first - kbest_loss_last) >= 0) kbest_loss_improve++; - - } // noup - - if (rescale) lambdas /= lambdas.l2norm(); - - ++ii; + i++; } // input loop - if (t == 0) in_sz = ii; // remember size of input (# lines) - + if (t == 0) + input_sz = i; // remember size of input (# lines) + + // update average + if (average) + w_average += lambdas; + + // stats + weight_t gold_avg = gold_sum/(weight_t)input_sz; + size_t non_zero = (size_t)lambdas.num_nonzero(); + cerr << _p5 << _p << "WEIGHTS" << endl; + for (auto name: print_weights) + cerr << setw(18) << name << " = " << lambdas.get(FD::Convert(name)) << endl; + cerr << " ---" << endl; + cerr << _np << " 1best avg score: " << gold_avg; + cerr << _p << " (" << gold_avg-gold_prev << ")" << endl; + cerr << _np << " 1best avg model score: " << model_sum/(weight_t)input_sz << endl; + cerr << " avg # pairs: "; + cerr << _np << num_pairs/(float)input_sz << endl; + cerr << " non-0 feature count: " << non_zero << endl; + cerr << " avg list sz: " << list_sz/(float)input_sz << endl; + cerr << " avg f count: " << feature_count/(float)list_sz << endl; + + if (gold_avg > best) { + best = gold_avg; + best_iteration = t; + } + gold_prev = gold_avg; - if (batch) { - lambdas.plus_eq_v_times_s(batch_updates, eta); - if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); - batch_updates.clear(); - } - - if (average) w_average += lambdas; - - if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset(); - - // print some stats - score_t score_avg = score_sum/(score_t)in_sz; - score_t model_avg = model_sum/(score_t)in_sz; - score_t score_diff, model_diff; - if (t > 0) { - score_diff = score_avg - all_scores[t-1].first; - model_diff = model_avg - all_scores[t-1].second; - } else { - score_diff = score_avg; - model_diff = model_avg; - } - - unsigned nonz = 0; - if (!quiet) nonz = (unsigned)lambdas.num_nonzero(); - - if (!quiet) { - cerr << _p5 << _p << "WEIGHTS" << endl; - for (vector<string>::iterator it = print_weights.begin(); it != print_weights.end(); it++) { - cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl; - } - cerr << " ---" << endl; - cerr << _np << " 1best avg score: " << score_avg; - cerr << _p << " (" << score_diff << ")" << endl; - cerr << _np << " 1best avg model score: " << model_avg; - cerr << _p << " (" << model_diff << ")" << endl; - cerr << " avg # pairs: "; - cerr << _np << npairs/(float)in_sz << endl; - cerr << " avg # rank err: "; - cerr << rank_errors/(float)in_sz; - if (faster_perceptron) cerr << " (meaningless)"; - cerr << endl; - cerr << " avg # margin viol: "; - cerr << margin_violations/(float)in_sz << endl; - if (batch) cerr << " batch loss: " << batch_loss << endl; - cerr << " k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl; - cerr << " non0 feature count: " << nonz << endl; - cerr << " avg list sz: " << list_sz/(float)in_sz << endl; - cerr << " avg f count: " << f_count/(float)list_sz << endl; - } - - pair<score_t,score_t> remember; - remember.first = score_avg; - remember.second = model_avg; - all_scores.push_back(remember); - if (score_avg > max_score) { - max_score = score_avg; - best_it = t; - } time (&end); - float time_diff = difftime(end, start); - overall_time += time_diff; - if (!quiet) { - cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; - cerr << time_diff/in_sz << " s/S)" << endl; - } - if (t+1 != T && !quiet) cerr << endl; - - if (noup) break; + time_t time_diff = difftime(end, start); + total_time += time_diff; + cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; + cerr << time_diff/input_sz << " s/S)" << endl; + if (t+1 != T) cerr << endl; - // write weights to file - if (select_weights == "best" || keep) { + if (keep) { // keep intermediate weights lambdas.init_vector(&decoder_weights); string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz"; Weights::WriteToFile(w_fn, decoder_weights, true); } - if (check) cout << "---" << endl; - } // outer loop - if (average) w_average /= (weight_t)T; - - if (!noup) { - if (!quiet) cerr << endl << "Writing weights file to '" << output_fn << "' ..." << endl; - if (select_weights == "last" || average) { // last, average - WriteFile of(output_fn); // works with '-' - ostream& o = *of.stream(); - o.precision(17); - o << _np; - if (average) { - for (SparseVector<weight_t>::iterator it = w_average.begin(); it != w_average.end(); ++it) { - if (it->second == 0) continue; - o << FD::Convert(it->first) << '\t' << it->second << endl; - } - } else { - for (SparseVector<weight_t>::iterator it = lambdas.begin(); it != lambdas.end(); ++it) { - if (it->second == 0) continue; - o << FD::Convert(it->first) << '\t' << it->second << endl; - } - } - } else if (select_weights == "VOID") { // do nothing with the weights - } else { // best - if (output_fn != "-") { - CopyFile("weights."+boost::lexical_cast<string>(best_it)+".gz", output_fn); - } else { - ReadFile bestw("weights."+boost::lexical_cast<string>(best_it)+".gz"); - string o; - cout.precision(17); - cout << _np; - while(getline(*bestw, o)) cout << o << endl; - } - if (!keep) { - for (unsigned i = 0; i < T; i++) { - string s = "weights." + boost::lexical_cast<string>(i) + ".gz"; - unlink(s.c_str()); - } - } - } - if (!quiet) cerr << "done" << endl; + // final weights + if (average) { + w_average /= (weight_t)T; + w_average.init_vector(decoder_weights); + } else if (!keep) { + lambdas.init_vector(decoder_weights); } + Weights::WriteToFile(output_fn, decoder_weights, true); - if (!quiet) { - cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; - cerr << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; - cerr << "This took " << overall_time/60. << " min." << endl; - } + cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; + cerr << best_iteration+1 << " [GOLD = " << best << "]." << endl; + cerr << "This took " << total_time/60. << " min." << endl; + + return 0; } diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h index 07bd9b65..728b0698 100644 --- a/training/dtrain/dtrain.h +++ b/training/dtrain/dtrain.h @@ -1,9 +1,6 @@ #ifndef _DTRAIN_H_ #define _DTRAIN_H_ -#define DTRAIN_DOTS 10 // after how many inputs to display a '.' -#define DTRAIN_SCALE 100000 - #include <iomanip> #include <climits> #include <string.h> @@ -25,113 +22,76 @@ namespace po = boost::program_options; namespace dtrain { - -inline void register_and_convert(const vector<string>& strs, vector<WordID>& ids) -{ - vector<string>::const_iterator it; - for (it = strs.begin(); it < strs.end(); it++) - ids.push_back(TD::Convert(*it)); -} - -inline string gettmpf(const string path, const string infix) -{ - char fn[path.size() + infix.size() + 8]; - strcpy(fn, path.c_str()); - strcat(fn, "/"); - strcat(fn, infix.c_str()); - strcat(fn, "-XXXXXX"); - if (!mkstemp(fn)) { - cerr << "Cannot make temp file in" << path << " , exiting." << endl; - exit(1); - } - return string(fn); -} - -typedef double score_t; - struct ScoredHyp { - vector<WordID> w; - SparseVector<double> f; - score_t model; - score_t score; - unsigned rank; + vector<WordID> w; + SparseVector<weight_t> f; + weight_t model, gold; + size_t rank; }; -struct LocalScorer +inline void +RegisterAndConvert(const vector<string>& strs, vector<WordID>& ids) { - unsigned N_; - vector<score_t> w_; - - virtual score_t - Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; - - virtual void Reset() {} // only for ApproxBleuScorer, LinearBleuScorer - - inline void - Init(unsigned N, vector<score_t> weights) - { - assert(N > 0); - N_ = N; - if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_); - else w_ = weights; - } - - inline score_t - brevity_penalty(const unsigned hyp_len, const unsigned ref_len) - { - if (hyp_len > ref_len) return 1; - return exp(1 - (score_t)ref_len/hyp_len); - } -}; - -struct HypSampler : public DecoderObserver -{ - LocalScorer* scorer_; - vector<WordID>* ref_; - unsigned f_count_, sz_; - virtual vector<ScoredHyp>* GetSamples()=0; - inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; } - inline void SetRef(vector<WordID>& ref) { ref_ = &ref; } - inline unsigned get_f_count() { return f_count_; } - inline unsigned get_sz() { return sz_; } -}; + for (auto s: strs) + ids.push_back(TD::Convert(s)); +} -struct HSReporter +inline void +PrintWordIDVec(vector<WordID>& v, ostream& os=cerr) { - string task_id_; - - HSReporter(string task_id) : task_id_(task_id) {} - - inline void update_counter(string name, unsigned amount) { - cerr << "reporter:counter:" << task_id_ << "," << name << "," << amount << endl; - } - inline void update_gcounter(string name, unsigned amount) { - cerr << "reporter:counter:Global," << name << "," << amount << endl; + for (size_t i = 0; i < v.size(); i++) { + os << TD::Convert(v[i]); + if (i < v.size()-1) os << " "; } -}; +} inline ostream& _np(ostream& out) { return out << resetiosflags(ios::showpos); } inline ostream& _p(ostream& out) { return out << setiosflags(ios::showpos); } inline ostream& _p2(ostream& out) { return out << setprecision(2); } inline ostream& _p5(ostream& out) { return out << setprecision(5); } -inline void printWordIDVec(vector<WordID>& v, ostream& os=cerr) +bool +dtrain_init(int argc, char** argv, po::variables_map* conf) { - for (unsigned i = 0; i < v.size(); i++) { - os << TD::Convert(v[i]); - if (i < v.size()-1) os << " "; + po::options_description ini("Configuration File Options"); + ini.add_options() + ("bitext,b", po::value<string>(), "bitext") + ("decoder_config,C", po::value<string>(), "configuration file for decoder") + ("iterations,T", po::value<size_t>()->default_value(10), "number of iterations T (per shard)") + ("k", po::value<size_t>()->default_value(100), "size of kbest list") + ("learning_rate,l", po::value<weight_t>()->default_value(1.0), "learning rate") + ("l1_reg,r", po::value<weight_t>()->default_value(0.), "l1 regularization strength") + ("error_margin,m", po::value<weight_t>()->default_value(0.), "margin for margin perceptron") + ("N", po::value<size_t>()->default_value(4), "N for BLEU approximation") + ("input_weights,w", po::value<string>(), "input weights file") + ("average,a", po::value<bool>()->default_value(false), "output average weights") + ("keep,K", po::value<bool>()->default_value(false), "output a weight file per iteration") + ("output,o", po::value<string>()->default_value("-"), "output weights file, '-' for STDOUT") + ("print_weights,P", po::value<string>()->default_value("EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV"), + "list of weights to print after each iteration"); + po::options_description cl("Command Line Options"); + cl.add_options() + ("config,c", po::value<string>(), "dtrain config file"); + cl.add(ini); + po::store(parse_command_line(argc, argv, cl), *conf); + if (conf->count("config")) { + ifstream f((*conf)["config"].as<string>().c_str()); + po::store(po::parse_config_file(f, ini), *conf); + } + po::notify(*conf); + if (!conf->count("decoder_config")) { + cerr << "Missing decoder configuration." << endl; + return false; + } + if (!conf->count("bitext")) { + cerr << "No training data given." << endl; + return false; } -} -template<typename T> -inline T sign(T z) -{ - if (z == 0) return 0; - return z < 0 ? -1 : +1; + return true; } - } // namespace #endif diff --git a/training/dtrain/examples/parallelized/README b/training/dtrain/examples/parallelized/README index 89715105..c4addd81 100644 --- a/training/dtrain/examples/parallelized/README +++ b/training/dtrain/examples/parallelized/README @@ -1,5 +1,5 @@ run for example - ../../parallelize.rb ./dtrain.ini 4 false 2 2 ./in ./refs + ../../parallelize.rb -c dtrain.ini -s 4 -e 3 -d ../../dtrain -p 2 -i in -final weights will be in the file work/weights.3 +final weights will be in the file work/weights.2 diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini index 5773029a..733b1653 100644 --- a/training/dtrain/examples/parallelized/cdec.ini +++ b/training/dtrain/examples/parallelized/cdec.ini @@ -4,7 +4,7 @@ intersection_strategy=cube_pruning cubepruning_pop_limit=200 scfg_max_span_limit=15 feature_function=WordPenalty -feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz +feature_function=KLanguageModel ../standard/nc-wmt11.en.srilm.gz #feature_function=ArityPenalty #feature_function=CMR2008ReorderingFeatures #feature_function=Dwarf diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini index 0b0932d6..9fc205a3 100644 --- a/training/dtrain/examples/parallelized/dtrain.ini +++ b/training/dtrain/examples/parallelized/dtrain.ini @@ -1,14 +1,7 @@ k=100 N=4 learning_rate=0.0001 -gamma=0 -loss_margin=1.0 -epochs=1 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -hi_lo=0.1 -select_weights=last -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough +error_margin=1.0 +iterations=1 decoder_config=cdec.ini +print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough diff --git a/training/dtrain/examples/parallelized/in b/training/dtrain/examples/parallelized/in index 51d01fe7..82555908 100644 --- a/training/dtrain/examples/parallelized/in +++ b/training/dtrain/examples/parallelized/in @@ -1,10 +1,10 @@ -<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> -<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> -<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> -<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> -<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> -<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> -<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> -<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> -<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> -<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> +<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house +<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . +<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . +<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . +<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . +<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . +<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows . +<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . +<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . +<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/refs b/training/dtrain/examples/parallelized/refs deleted file mode 100644 index 632e27b0..00000000 --- a/training/dtrain/examples/parallelized/refs +++ /dev/null @@ -1,10 +0,0 @@ -europe 's divided racial house -a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . -the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . -while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . -an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . -mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . -it will not , as america 's racial history clearly shows . -race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . -the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . -this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0 index c559dd4d..77749404 100644 --- a/training/dtrain/examples/parallelized/work/out.0.0 +++ b/training/dtrain/examples/parallelized/work/out.0.0 @@ -1,62 +1,43 @@ - cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz +Reading ../standard/nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 405292278 - dtrain Parameters: k 100 N 4 T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' input 'work/shard.0.0.in' - refs 'work/shard.0.0.refs' output 'work/weights.0.0' -(a dot represents 10 inputs) +(a dot per input) Iteration #1 of 1. - 5 + .... 3 WEIGHTS - Glue = +0.2663 - WordPenalty = -0.0079042 - LanguageModel = +0.44782 - LanguageModel_OOV = -0.0401 - PhraseModel_0 = -0.193 - PhraseModel_1 = +0.71321 - PhraseModel_2 = +0.85196 - PhraseModel_3 = -0.43986 - PhraseModel_4 = -0.44803 - PhraseModel_5 = -0.0538 - PhraseModel_6 = -0.1788 - PassThrough = -0.1477 + Glue = +0.3404 + WordPenalty = -0.017632 + LanguageModel = +0.72958 + LanguageModel_OOV = -0.235 + PhraseModel_0 = -0.43721 + PhraseModel_1 = +1.01 + PhraseModel_2 = +1.3525 + PhraseModel_3 = -0.25541 + PhraseModel_4 = -0.78115 + PhraseModel_5 = +0 + PhraseModel_6 = -0.3681 + PassThrough = -0.3304 --- - 1best avg score: 0.17521 (+0.17521) - 1best avg model score: 21.556 (+21.556) - avg # pairs: 1671.2 - avg # rank err: 1118.6 - avg # margin viol: 552.6 - non0 feature count: 12 + 1best avg score: 0.19474 (+0.19474) + 1best avg model score: 0.52232 + avg # pairs: 2513 + non-0 feature count: 11 avg list sz: 100 - avg f count: 11.32 -(time 0.35 min, 4.2 s/S) - -Writing weights file to 'work/weights.0.0' ... -done + avg f count: 11.42 +(time 0.32 min, 6 s/S) --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.17521]. -This took 0.35 min. +Best iteration: 1 [GOLD = 0.19474]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1 index 8bc7ea9c..d0dee623 100644 --- a/training/dtrain/examples/parallelized/work/out.0.1 +++ b/training/dtrain/examples/parallelized/work/out.0.1 @@ -1,63 +1,44 @@ - cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz +Reading ../standard/nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 43859692 - dtrain Parameters: k 100 N 4 T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' input 'work/shard.0.0.in' - refs 'work/shard.0.0.refs' output 'work/weights.0.1' weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input) Iteration #1 of 1. - 5 + .... 3 WEIGHTS - Glue = -0.2699 - WordPenalty = +0.080605 - LanguageModel = -0.026572 - LanguageModel_OOV = -0.30025 - PhraseModel_0 = -0.32076 - PhraseModel_1 = +0.67451 - PhraseModel_2 = +0.92 - PhraseModel_3 = -0.36402 - PhraseModel_4 = -0.592 - PhraseModel_5 = -0.0269 - PhraseModel_6 = -0.28755 - PassThrough = -0.33285 + Glue = -0.40908 + WordPenalty = +0.12967 + LanguageModel = +0.39892 + LanguageModel_OOV = -0.6314 + PhraseModel_0 = -0.63992 + PhraseModel_1 = +0.74198 + PhraseModel_2 = +1.3096 + PhraseModel_3 = -0.1216 + PhraseModel_4 = -1.2274 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.21093 + PassThrough = -0.66155 --- - 1best avg score: 0.26638 (+0.26638) - 1best avg model score: 53.197 (+53.197) - avg # pairs: 2028.6 - avg # rank err: 998.2 - avg # margin viol: 918.8 - non0 feature count: 12 + 1best avg score: 0.15735 (+0.15735) + 1best avg model score: 46.831 + avg # pairs: 2132.3 + non-0 feature count: 12 avg list sz: 100 - avg f count: 10.496 -(time 0.35 min, 4.2 s/S) - -Writing weights file to 'work/weights.0.1' ... -done + avg f count: 10.64 +(time 0.38 min, 7 s/S) --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.26638]. -This took 0.35 min. +Best iteration: 1 [GOLD = 0.15735]. +This took 0.38333 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.2 b/training/dtrain/examples/parallelized/work/out.0.2 new file mode 100644 index 00000000..9c4b110b --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.0.2 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.0.0.in' + output 'work/weights.0.2' + weights in 'work/weights.1' +(a dot per input) +Iteration #1 of 1. + .... 3 +WEIGHTS + Glue = -0.44422 + WordPenalty = +0.1032 + LanguageModel = +0.66474 + LanguageModel_OOV = -0.62252 + PhraseModel_0 = -0.59993 + PhraseModel_1 = +0.78992 + PhraseModel_2 = +1.3149 + PhraseModel_3 = +0.21434 + PhraseModel_4 = -1.0174 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.18452 + PassThrough = -0.65268 + --- + 1best avg score: 0.24722 (+0.24722) + 1best avg model score: 61.971 + avg # pairs: 2017.7 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 10.42 +(time 0.3 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.24722]. +This took 0.3 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0 index 65d1e7dc..3dc4dca6 100644 --- a/training/dtrain/examples/parallelized/work/out.1.0 +++ b/training/dtrain/examples/parallelized/work/out.1.0 @@ -1,62 +1,43 @@ - cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz +Reading ../standard/nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 4126799437 - dtrain Parameters: k 100 N 4 T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' input 'work/shard.1.0.in' - refs 'work/shard.1.0.refs' output 'work/weights.1.0' -(a dot represents 10 inputs) +(a dot per input) Iteration #1 of 1. - 5 + .... 3 WEIGHTS - Glue = -0.3815 - WordPenalty = +0.20064 - LanguageModel = +0.95304 - LanguageModel_OOV = -0.264 - PhraseModel_0 = -0.22362 - PhraseModel_1 = +0.12254 - PhraseModel_2 = +0.26328 - PhraseModel_3 = +0.38018 - PhraseModel_4 = -0.48654 - PhraseModel_5 = +0 - PhraseModel_6 = -0.3645 - PassThrough = -0.2216 + Glue = -0.2722 + WordPenalty = +0.05433 + LanguageModel = +0.69948 + LanguageModel_OOV = -0.2641 + PhraseModel_0 = -1.4208 + PhraseModel_1 = -1.563 + PhraseModel_2 = -0.21051 + PhraseModel_3 = -0.17764 + PhraseModel_4 = -1.6583 + PhraseModel_5 = +0.0794 + PhraseModel_6 = +0.1528 + PassThrough = -0.2367 --- - 1best avg score: 0.10863 (+0.10863) - 1best avg model score: -4.9841 (-4.9841) - avg # pairs: 1345.4 - avg # rank err: 822.4 - avg # margin viol: 501 - non0 feature count: 11 + 1best avg score: 0.071329 (+0.071329) + 1best avg model score: -41.362 + avg # pairs: 1862.3 + non-0 feature count: 12 avg list sz: 100 - avg f count: 11.814 -(time 0.43 min, 5.2 s/S) - -Writing weights file to 'work/weights.1.0' ... -done + avg f count: 11.847 +(time 0.28 min, 5 s/S) --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.10863]. -This took 0.43333 min. +Best iteration: 1 [GOLD = 0.071329]. +This took 0.28333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1 index f479fbbc..79ac35dc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.1 +++ b/training/dtrain/examples/parallelized/work/out.1.1 @@ -1,63 +1,44 @@ - cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz +Reading ../standard/nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 2112412848 - dtrain Parameters: k 100 N 4 T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' input 'work/shard.1.0.in' - refs 'work/shard.1.0.refs' output 'work/weights.1.1' weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input) Iteration #1 of 1. - 5 + .... 3 WEIGHTS - Glue = -0.3178 - WordPenalty = +0.11092 - LanguageModel = +0.17269 - LanguageModel_OOV = -0.13485 - PhraseModel_0 = -0.45371 - PhraseModel_1 = +0.38789 - PhraseModel_2 = +0.75311 - PhraseModel_3 = -0.38163 - PhraseModel_4 = -0.58817 - PhraseModel_5 = -0.0269 - PhraseModel_6 = -0.27315 - PassThrough = -0.16745 + Glue = -0.20488 + WordPenalty = -0.0091745 + LanguageModel = +0.79433 + LanguageModel_OOV = -0.4309 + PhraseModel_0 = -0.56242 + PhraseModel_1 = +0.85363 + PhraseModel_2 = +1.3458 + PhraseModel_3 = -0.13095 + PhraseModel_4 = -0.94762 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.16003 + PassThrough = -0.46105 --- - 1best avg score: 0.13169 (+0.13169) - 1best avg model score: 24.226 (+24.226) - avg # pairs: 1951.2 - avg # rank err: 985.4 - avg # margin viol: 951 - non0 feature count: 12 + 1best avg score: 0.13017 (+0.13017) + 1best avg model score: 14.53 + avg # pairs: 1968 + non-0 feature count: 12 avg list sz: 100 - avg f count: 11.224 -(time 0.45 min, 5.4 s/S) - -Writing weights file to 'work/weights.1.1' ... -done + avg f count: 11 +(time 0.33 min, 6 s/S) --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.13169]. -This took 0.45 min. +Best iteration: 1 [GOLD = 0.13017]. +This took 0.33333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.2 b/training/dtrain/examples/parallelized/work/out.1.2 new file mode 100644 index 00000000..8c4f8b03 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.1.2 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.1.0.in' + output 'work/weights.1.2' + weights in 'work/weights.1' +(a dot per input) +Iteration #1 of 1. + .... 3 +WEIGHTS + Glue = -0.49853 + WordPenalty = +0.07636 + LanguageModel = +1.3183 + LanguageModel_OOV = -0.60902 + PhraseModel_0 = -0.22481 + PhraseModel_1 = +0.86369 + PhraseModel_2 = +1.0747 + PhraseModel_3 = +0.18002 + PhraseModel_4 = -0.84661 + PhraseModel_5 = +0.02435 + PhraseModel_6 = +0.11247 + PassThrough = -0.63918 + --- + 1best avg score: 0.15478 (+0.15478) + 1best avg model score: -7.2154 + avg # pairs: 1776 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 11.327 +(time 0.27 min, 5 s/S) + +--- +Best iteration: 1 [GOLD = 0.15478]. +This took 0.26667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.0 b/training/dtrain/examples/parallelized/work/out.2.0 new file mode 100644 index 00000000..07c85963 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.2.0 @@ -0,0 +1,43 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.2.0.in' + output 'work/weights.2.0' +(a dot per input) +Iteration #1 of 1. + .... 3 +WEIGHTS + Glue = -0.2109 + WordPenalty = +0.14922 + LanguageModel = +0.79686 + LanguageModel_OOV = -0.6627 + PhraseModel_0 = +0.37999 + PhraseModel_1 = +0.69213 + PhraseModel_2 = +0.3422 + PhraseModel_3 = +1.1426 + PhraseModel_4 = -0.55413 + PhraseModel_5 = +0 + PhraseModel_6 = +0.0676 + PassThrough = -0.6343 + --- + 1best avg score: 0.072374 (+0.072374) + 1best avg model score: -27.384 + avg # pairs: 2582 + non-0 feature count: 11 + avg list sz: 100 + avg f count: 11.54 +(time 0.32 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.072374]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.1 b/training/dtrain/examples/parallelized/work/out.2.1 new file mode 100644 index 00000000..c54bb1b1 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.2.1 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.2.0.in' + output 'work/weights.2.1' + weights in 'work/weights.0' +(a dot per input) +Iteration #1 of 1. + .... 3 +WEIGHTS + Glue = -0.76608 + WordPenalty = +0.15938 + LanguageModel = +1.5897 + LanguageModel_OOV = -0.521 + PhraseModel_0 = -0.58348 + PhraseModel_1 = +0.29828 + PhraseModel_2 = +0.78493 + PhraseModel_3 = +0.083222 + PhraseModel_4 = -0.93843 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.27382 + PassThrough = -0.55115 + --- + 1best avg score: 0.12881 (+0.12881) + 1best avg model score: -9.6731 + avg # pairs: 2020.3 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 12 +(time 0.32 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.12881]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.2 b/training/dtrain/examples/parallelized/work/out.2.2 new file mode 100644 index 00000000..f5d6229f --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.2.2 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.2.0.in' + output 'work/weights.2.2' + weights in 'work/weights.1' +(a dot per input) +Iteration #1 of 1. + .... 3 +WEIGHTS + Glue = -0.90863 + WordPenalty = +0.10819 + LanguageModel = +0.5239 + LanguageModel_OOV = -0.41623 + PhraseModel_0 = -0.86868 + PhraseModel_1 = +0.40784 + PhraseModel_2 = +1.1793 + PhraseModel_3 = -0.24698 + PhraseModel_4 = -1.2353 + PhraseModel_5 = +0.03375 + PhraseModel_6 = -0.17883 + PassThrough = -0.44638 + --- + 1best avg score: 0.12788 (+0.12788) + 1best avg model score: 41.302 + avg # pairs: 2246.3 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 10.98 +(time 0.35 min, 7 s/S) + +--- +Best iteration: 1 [GOLD = 0.12788]. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.0 b/training/dtrain/examples/parallelized/work/out.3.0 new file mode 100644 index 00000000..fa499523 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.3.0 @@ -0,0 +1,43 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.3.0.in' + output 'work/weights.3.0' +(a dot per input) +Iteration #1 of 1. + .. 1 +WEIGHTS + Glue = -0.09 + WordPenalty = +0.32442 + LanguageModel = +2.5769 + LanguageModel_OOV = -0.009 + PhraseModel_0 = -0.58972 + PhraseModel_1 = +0.063691 + PhraseModel_2 = +0.5366 + PhraseModel_3 = +0.12867 + PhraseModel_4 = -1.9801 + PhraseModel_5 = +0.018 + PhraseModel_6 = -0.486 + PassThrough = -0.09 + --- + 1best avg score: 0.034204 (+0.034204) + 1best avg model score: 0 + avg # pairs: 1700 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 10.8 +(time 0.1 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.034204]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.1 b/training/dtrain/examples/parallelized/work/out.3.1 new file mode 100644 index 00000000..c4b3aa3c --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.3.1 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.3.0.in' + output 'work/weights.3.1' + weights in 'work/weights.0' +(a dot per input) +Iteration #1 of 1. + .. 1 +WEIGHTS + Glue = +0.31832 + WordPenalty = +0.11139 + LanguageModel = +0.95438 + LanguageModel_OOV = -0.0608 + PhraseModel_0 = -0.98113 + PhraseModel_1 = -0.090531 + PhraseModel_2 = +0.79088 + PhraseModel_3 = -0.57623 + PhraseModel_4 = -1.4382 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.10812 + PassThrough = -0.09095 + --- + 1best avg score: 0.084989 (+0.084989) + 1best avg model score: -52.323 + avg # pairs: 2487 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 12 +(time 0.1 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.084989]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.2 b/training/dtrain/examples/parallelized/work/out.3.2 new file mode 100644 index 00000000..eb27dac2 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/out.3.2 @@ -0,0 +1,44 @@ +Loading the LM will be faster if you build a binary file. +Reading ../standard/nc-wmt11.en.srilm.gz +----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 +**************************************************************************************************** +dtrain +Parameters: + k 100 + N 4 + T 1 + learning rate 0.0001 + error margin 1 + l1 reg 0 + decoder conf 'cdec.ini' + input 'work/shard.3.0.in' + output 'work/weights.3.2' + weights in 'work/weights.1' +(a dot per input) +Iteration #1 of 1. + .. 1 +WEIGHTS + Glue = -0.12993 + WordPenalty = +0.13651 + LanguageModel = +0.58946 + LanguageModel_OOV = -0.48362 + PhraseModel_0 = -0.81262 + PhraseModel_1 = +0.44273 + PhraseModel_2 = +1.1733 + PhraseModel_3 = -0.1826 + PhraseModel_4 = -1.2213 + PhraseModel_5 = +0.02435 + PhraseModel_6 = -0.18823 + PassThrough = -0.51378 + --- + 1best avg score: 0.12674 (+0.12674) + 1best avg model score: -7.2878 + avg # pairs: 1769 + non-0 feature count: 12 + avg list sz: 100 + avg f count: 12 +(time 0.1 min, 6 s/S) + +--- +Best iteration: 1 [GOLD = 0.12674]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in index 92f9c78e..a0ef6f54 100644 --- a/training/dtrain/examples/parallelized/work/shard.0.0.in +++ b/training/dtrain/examples/parallelized/work/shard.0.0.in @@ -1,5 +1,3 @@ -<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> -<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> -<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> -<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> -<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> +<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house +<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . +<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.refs b/training/dtrain/examples/parallelized/work/shard.0.0.refs deleted file mode 100644 index bef68fee..00000000 --- a/training/dtrain/examples/parallelized/work/shard.0.0.refs +++ /dev/null @@ -1,5 +0,0 @@ -europe 's divided racial house -a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . -the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . -while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . -an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in index b7695ce7..05f0273b 100644 --- a/training/dtrain/examples/parallelized/work/shard.1.0.in +++ b/training/dtrain/examples/parallelized/work/shard.1.0.in @@ -1,5 +1,3 @@ -<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> -<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> -<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> -<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> -<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> +<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . +<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . +<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.refs b/training/dtrain/examples/parallelized/work/shard.1.0.refs deleted file mode 100644 index 6076f6d5..00000000 --- a/training/dtrain/examples/parallelized/work/shard.1.0.refs +++ /dev/null @@ -1,5 +0,0 @@ -mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . -it will not , as america 's racial history clearly shows . -race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . -the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . -this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/shard.2.0.in b/training/dtrain/examples/parallelized/work/shard.2.0.in new file mode 100644 index 00000000..0528d357 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/shard.2.0.in @@ -0,0 +1,3 @@ +<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows . +<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . +<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . diff --git a/training/dtrain/examples/parallelized/work/shard.3.0.in b/training/dtrain/examples/parallelized/work/shard.3.0.in new file mode 100644 index 00000000..f7cbb3e3 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/shard.3.0.in @@ -0,0 +1 @@ +<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0 index ddd595a8..816269cd 100644 --- a/training/dtrain/examples/parallelized/work/weights.0 +++ b/training/dtrain/examples/parallelized/work/weights.0 @@ -1,12 +1,12 @@ -LanguageModel 0.7004298992212881 -PhraseModel_2 0.5576194336478857 -PhraseModel_1 0.41787318415343155 -PhraseModel_4 -0.46728502545635164 -PhraseModel_3 -0.029839521598455515 -Glue -0.05760000000000068 -PhraseModel_6 -0.2716499999999978 -PhraseModel_0 -0.20831031065605327 -LanguageModel_OOV -0.15205000000000077 -PassThrough -0.1846500000000006 -WordPenalty 0.09636994553433414 -PhraseModel_5 -0.026900000000000257 +LanguageModel 1.200704259340465 +PhraseModel_4 -1.2434381298299035 +PhraseModel_1 0.050697726409824076 +PhraseModel_0 -0.516923312932941 +PhraseModel_2 0.5051987092783867 +PhraseModel_3 0.20955092377784057 +PassThrough -0.32285 +LanguageModel_OOV -0.29269999999999996 +PhraseModel_6 -0.158425 +Glue -0.05817500000000002 +WordPenalty 0.12758486142112804 +PhraseModel_5 0.02435 diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0 index c9370b18..be386c62 100644 --- a/training/dtrain/examples/parallelized/work/weights.0.0 +++ b/training/dtrain/examples/parallelized/work/weights.0.0 @@ -1,12 +1,11 @@ -WordPenalty -0.0079041595706392243 -LanguageModel 0.44781580828279532 -LanguageModel_OOV -0.04010000000000042 -Glue 0.26629999999999948 -PhraseModel_0 -0.19299677809125185 -PhraseModel_1 0.71321026861732773 -PhraseModel_2 0.85195540993310537 -PhraseModel_3 -0.43986310822842656 -PhraseModel_4 -0.44802855630415955 -PhraseModel_5 -0.053800000000000514 -PhraseModel_6 -0.17879999999999835 -PassThrough -0.14770000000000036 +WordPenalty -0.017632355965271129 +LanguageModel 0.72957628464102753 +LanguageModel_OOV -0.23499999999999999 +PhraseModel_0 -0.43720953659541578 +PhraseModel_1 1.0100170838129212 +PhraseModel_2 1.3524984123857073 +PhraseModel_3 -0.25541132249775761 +PhraseModel_4 -0.78115161368856911 +PhraseModel_6 -0.36810000000000004 +Glue 0.34040000000000004 +PassThrough -0.33040000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1 index 8fad3de8..d4c77d07 100644 --- a/training/dtrain/examples/parallelized/work/weights.0.1 +++ b/training/dtrain/examples/parallelized/work/weights.0.1 @@ -1,12 +1,12 @@ -WordPenalty 0.080605055841244472 -LanguageModel -0.026571720531022844 -LanguageModel_OOV -0.30024999999999141 -Glue -0.26989999999999842 -PhraseModel_2 0.92000295209089566 -PhraseModel_1 0.67450748692470841 -PhraseModel_4 -0.5920000014976784 -PhraseModel_3 -0.36402437203127397 -PhraseModel_6 -0.28754999999999603 -PhraseModel_0 -0.32076244202907672 -PassThrough -0.33284999999999004 -PhraseModel_5 -0.026900000000000257 +WordPenalty 0.12966947493426365 +LanguageModel 0.3989224621154368 +LanguageModel_OOV -0.63139999999999996 +PhraseModel_0 -0.63991953012355962 +PhraseModel_1 0.74197897612368646 +PhraseModel_2 1.3096163833051435 +PhraseModel_3 -0.12160001974680773 +PhraseModel_4 -1.2274031286515816 +PhraseModel_5 0.02435 +PhraseModel_6 -0.210925 +Glue -0.40907500000000002 +PassThrough -0.66155000000000008 diff --git a/training/dtrain/examples/parallelized/work/weights.0.2 b/training/dtrain/examples/parallelized/work/weights.0.2 new file mode 100644 index 00000000..8ce1449b --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.0.2 @@ -0,0 +1,12 @@ +WordPenalty 0.10319922626226019 +LanguageModel 0.6647396869692952 +LanguageModel_OOV -0.622525 +PhraseModel_0 -0.59993441316076157 +PhraseModel_1 0.78991513935858193 +PhraseModel_2 1.3148638774685031 +PhraseModel_3 0.2143393571820455 +PhraseModel_4 -1.0173894637028262 +PhraseModel_5 0.02435 +PhraseModel_6 -0.18452499999999999 +Glue -0.44422499999999998 +PassThrough -0.65267500000000012 diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1 index 03058a16..2a00be2e 100644 --- a/training/dtrain/examples/parallelized/work/weights.1 +++ b/training/dtrain/examples/parallelized/work/weights.1 @@ -1,12 +1,12 @@ -PhraseModel_2 0.8365578543552836 -PhraseModel_4 -0.5900840266009169 -PhraseModel_1 0.5312000609786991 -PhraseModel_0 -0.3872342271319619 -PhraseModel_3 -0.3728279676912084 -Glue -0.2938500000000036 -PhraseModel_6 -0.2803499999999967 -PassThrough -0.25014999999999626 -LanguageModel_OOV -0.21754999999999702 -LanguageModel 0.07306061161169894 -WordPenalty 0.09576193325966899 -PhraseModel_5 -0.026900000000000257 +PhraseModel_4 -1.1379250444170055 +PhraseModel_2 1.0578050661336098 +LanguageModel 0.9343385461706668 +PhraseModel_0 -0.6917392152965985 +PhraseModel_1 0.4508371141128957 +PassThrough -0.4411750000000001 +Glue -0.265425 +LanguageModel_OOV -0.411025 +PhraseModel_3 -0.186390082624459 +PhraseModel_6 -0.188225 +WordPenalty 0.09781397468665984 +PhraseModel_5 0.02435 diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0 index 6a6a65c1..cdcf959e 100644 --- a/training/dtrain/examples/parallelized/work/weights.1.0 +++ b/training/dtrain/examples/parallelized/work/weights.1.0 @@ -1,11 +1,12 @@ -WordPenalty 0.20064405063930751 -LanguageModel 0.9530439901597807 -LanguageModel_OOV -0.26400000000000112 -Glue -0.38150000000000084 -PhraseModel_0 -0.22362384322085468 -PhraseModel_1 0.12253609968953538 -PhraseModel_2 0.26328345736266612 -PhraseModel_3 0.38018406503151553 -PhraseModel_4 -0.48654149460854373 -PhraseModel_6 -0.36449999999999722 -PassThrough -0.22160000000000085 +WordPenalty 0.05433023968609621 +LanguageModel 0.69947965605855011 +LanguageModel_OOV -0.2641 +PhraseModel_0 -1.4207505705360111 +PhraseModel_1 -1.563047680441811 +PhraseModel_2 -0.21050528366541305 +PhraseModel_3 -0.17764037275860439 +PhraseModel_4 -1.6583462458159566 +PhraseModel_5 0.079399999999999998 +PhraseModel_6 0.15280000000000002 +Glue -0.27220000000000005 +PassThrough -0.23670000000000002 diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1 index f56ea4a2..c1bb2cf0 100644 --- a/training/dtrain/examples/parallelized/work/weights.1.1 +++ b/training/dtrain/examples/parallelized/work/weights.1.1 @@ -1,12 +1,12 @@ -WordPenalty 0.1109188106780935 -LanguageModel 0.17269294375442074 -LanguageModel_OOV -0.13485000000000266 -Glue -0.3178000000000088 -PhraseModel_2 0.75311275661967159 -PhraseModel_1 0.38789263503268989 -PhraseModel_4 -0.58816805170415531 -PhraseModel_3 -0.38163156335114284 -PhraseModel_6 -0.27314999999999739 -PhraseModel_0 -0.45370601223484697 -PassThrough -0.16745000000000249 -PhraseModel_5 -0.026900000000000257 +WordPenalty -0.0091744709302067785 +LanguageModel 0.79433413663506514 +LanguageModel_OOV -0.43090000000000001 +PhraseModel_0 -0.56242499947237046 +PhraseModel_1 0.85362516703032698 +PhraseModel_2 1.3457900890481096 +PhraseModel_3 -0.13095079554478939 +PhraseModel_4 -0.94761908497413061 +PhraseModel_5 0.02435 +PhraseModel_6 -0.160025 +Glue -0.20487500000000003 +PassThrough -0.46105000000000007 diff --git a/training/dtrain/examples/parallelized/work/weights.1.2 b/training/dtrain/examples/parallelized/work/weights.1.2 new file mode 100644 index 00000000..c9598a04 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.1.2 @@ -0,0 +1,12 @@ +WordPenalty 0.076359827280638559 +LanguageModel 1.3183380272921175 +LanguageModel_OOV -0.60902499999999993 +PhraseModel_0 -0.2248075206657828 +PhraseModel_1 0.86368802571834491 +PhraseModel_2 1.0746702462261808 +PhraseModel_3 0.18002263643876637 +PhraseModel_4 -0.84660750337519441 +PhraseModel_5 0.02435 +PhraseModel_6 0.11247499999999999 +Glue -0.49852500000000005 +PassThrough -0.63917500000000005 diff --git a/training/dtrain/examples/parallelized/work/weights.2 b/training/dtrain/examples/parallelized/work/weights.2 new file mode 100644 index 00000000..310973ec --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.2 @@ -0,0 +1,12 @@ +PhraseModel_2 1.185520780812669 +PhraseModel_4 -1.0801541070647134 +LanguageModel 0.7741099486587568 +PhraseModel_0 -0.6265095873268189 +PhraseModel_1 0.6260421233840029 +PassThrough -0.5630000000000002 +Glue -0.495325 +LanguageModel_OOV -0.53285 +PhraseModel_3 -0.008805626854390465 +PhraseModel_6 -0.10977500000000001 +WordPenalty 0.1060655698428214 +PhraseModel_5 0.026699999999999998 diff --git a/training/dtrain/examples/parallelized/work/weights.2.0 b/training/dtrain/examples/parallelized/work/weights.2.0 new file mode 100644 index 00000000..3e87fed4 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.2.0 @@ -0,0 +1,11 @@ +WordPenalty 0.14922358398195767 +LanguageModel 0.79685677298009394 +LanguageModel_OOV -0.66270000000000007 +PhraseModel_0 0.37998874905310187 +PhraseModel_1 0.69213063228111271 +PhraseModel_2 0.34219807728516061 +PhraseModel_3 1.1425846772648622 +PhraseModel_4 -0.55412548521619742 +PhraseModel_6 0.067599999999999993 +Glue -0.21090000000000003 +PassThrough -0.63429999999999997 diff --git a/training/dtrain/examples/parallelized/work/weights.2.1 b/training/dtrain/examples/parallelized/work/weights.2.1 new file mode 100644 index 00000000..d129dc49 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.2.1 @@ -0,0 +1,12 @@ +WordPenalty 0.1593752174964457 +LanguageModel 1.5897162231676281 +LanguageModel_OOV -0.52100000000000002 +PhraseModel_0 -0.5834836741748588 +PhraseModel_1 0.29827543837280185 +PhraseModel_2 0.78493316593562568 +PhraseModel_3 0.083221832554333464 +PhraseModel_4 -0.93843312963279457 +PhraseModel_5 0.02435 +PhraseModel_6 -0.27382499999999999 +Glue -0.76607500000000006 +PassThrough -0.55115000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.2.2 b/training/dtrain/examples/parallelized/work/weights.2.2 new file mode 100644 index 00000000..bcc83b44 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.2.2 @@ -0,0 +1,12 @@ +WordPenalty 0.10819361280414735 +LanguageModel 0.52389743342585859 +LanguageModel_OOV -0.41622500000000001 +PhraseModel_0 -0.86867995703334211 +PhraseModel_1 0.40783818771767943 +PhraseModel_2 1.1792706530114188 +PhraseModel_3 -0.2469805689928464 +PhraseModel_4 -1.2352895858909159 +PhraseModel_5 0.033750000000000002 +PhraseModel_6 -0.17882500000000001 +Glue -0.90862500000000002 +PassThrough -0.44637500000000013 diff --git a/training/dtrain/examples/parallelized/work/weights.3.0 b/training/dtrain/examples/parallelized/work/weights.3.0 new file mode 100644 index 00000000..e3586048 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.3.0 @@ -0,0 +1,12 @@ +WordPenalty 0.32441797798172944 +LanguageModel 2.5769043236821889 +LanguageModel_OOV -0.0090000000000000011 +PhraseModel_0 -0.58972189365343919 +PhraseModel_1 0.063690869987073351 +PhraseModel_2 0.53660363110809217 +PhraseModel_3 0.12867071310286207 +PhraseModel_4 -1.9801291745988916 +PhraseModel_5 0.018000000000000002 +PhraseModel_6 -0.48600000000000004 +Glue -0.090000000000000011 +PassThrough -0.090000000000000011 diff --git a/training/dtrain/examples/parallelized/work/weights.3.1 b/training/dtrain/examples/parallelized/work/weights.3.1 new file mode 100644 index 00000000..b27687d3 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.3.1 @@ -0,0 +1,12 @@ +WordPenalty 0.11138567724613679 +LanguageModel 0.95438136276453733 +LanguageModel_OOV -0.060799999999999937 +PhraseModel_0 -0.98112865741560529 +PhraseModel_1 -0.090531125075232435 +PhraseModel_2 0.79088062624556033 +PhraseModel_3 -0.57623134776057228 +PhraseModel_4 -1.4382448344095151 +PhraseModel_5 0.02435 +PhraseModel_6 -0.108125 +Glue 0.31832499999999997 +PassThrough -0.090950000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.3.2 b/training/dtrain/examples/parallelized/work/weights.3.2 new file mode 100644 index 00000000..ccb591a2 --- /dev/null +++ b/training/dtrain/examples/parallelized/work/weights.3.2 @@ -0,0 +1,12 @@ +WordPenalty 0.13650961302423945 +LanguageModel 0.58946464694775647 +LanguageModel_OOV -0.48362499999999997 +PhraseModel_0 -0.81261645844738917 +PhraseModel_1 0.44272714074140529 +PhraseModel_2 1.1732783465445731 +PhraseModel_3 -0.18260393204552733 +PhraseModel_4 -1.2213298752899167 +PhraseModel_5 0.02435 +PhraseModel_6 -0.188225 +Glue -0.12992500000000001 +PassThrough -0.51377500000000009 diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index a515db02..f2698007 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -1,27 +1,10 @@ -#input=./nc-wmt11.de.gz -#refs=./nc-wmt11.en.gz -bitext=./nc-wmt11.gz +bitext=./nc-wmt11.gz # input bitext output=- # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=avg # output average (over epochs) weight vector decoder_config=./cdec.ini # config for cdec -# weights for these features will be printed on each iteration +iterations=3 # run over input 3 times +k=100 # use 100best lists +N=4 # optimize (approx.) BLEU4 +learning_rate=0.1 # learning rate +error_margin=1.0 # margin for margin perceptron print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -# newer version of the grammar extractor use different feature names: -#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV -stop_after=10 # stop epoch after 10 inputs -# interesting stuff -epochs=3 # run over input 3 times -k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 -scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) -gamma=0 # use SVM reg -sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only unique entries in kbest (surface form) -pair_sampling=XYX # -hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0 # minimum distance in BLEU (here: > 0) -loss_margin=0 # update if correctly ranked, but within this margin -repeat=1 # repeat training on a kbest list 1 times -#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output deleted file mode 100644 index 2460cfbb..00000000 --- a/training/dtrain/examples/standard/expected-output +++ /dev/null @@ -1,123 +0,0 @@ - cdec cfg './cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ./nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** - Example feature: Shape_S00000_T00000 -T=1 I=1 D=1 -Seeding random number sequence to 2327685089 - -dtrain -Parameters: - k 100 - N 4 - T 3 - batch 0 - scorer 'fixed_stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.1 - gamma 0 - loss margin 0 - faster perceptron 1 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'avg' - l1 reg 0 'none' - pclr no - max pairs 4294967295 - repeat 1 - cdec cfg './cdec.ini' - input './nc-wmt11.gz' - output '-' - stop_after 10 -(a dot represents 10 inputs) -Iteration #1 of 3. - . 10 -Stopping after 10 input sentences. -WEIGHTS - Glue = +6.9 - WordPenalty = -46.426 - LanguageModel = +535.12 - LanguageModel_OOV = -123.5 - PhraseModel_0 = -160.73 - PhraseModel_1 = -350.13 - PhraseModel_2 = -187.81 - PhraseModel_3 = +172.04 - PhraseModel_4 = +0.90108 - PhraseModel_5 = +21.6 - PhraseModel_6 = +67.2 - PassThrough = -149.7 - --- - 1best avg score: 0.23327 (+0.23327) - 1best avg model score: -9084.9 (-9084.9) - avg # pairs: 780.7 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 1389 - avg list sz: 91.3 - avg f count: 146.2 -(time 0.37 min, 2.2 s/S) - -Iteration #2 of 3. - . 10 -WEIGHTS - Glue = -43 - WordPenalty = -22.019 - LanguageModel = +591.53 - LanguageModel_OOV = -252.1 - PhraseModel_0 = -120.21 - PhraseModel_1 = -43.589 - PhraseModel_2 = +73.53 - PhraseModel_3 = +113.7 - PhraseModel_4 = -223.81 - PhraseModel_5 = +64 - PhraseModel_6 = +54.8 - PassThrough = -331.1 - --- - 1best avg score: 0.29568 (+0.062413) - 1best avg model score: -15879 (-6794.1) - avg # pairs: 566.1 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 1931 - avg list sz: 91.3 - avg f count: 139.89 -(time 0.33 min, 2 s/S) - -Iteration #3 of 3. - . 10 -WEIGHTS - Glue = -44.3 - WordPenalty = -131.85 - LanguageModel = +230.91 - LanguageModel_OOV = -285.4 - PhraseModel_0 = -194.27 - PhraseModel_1 = -294.83 - PhraseModel_2 = -92.043 - PhraseModel_3 = -140.24 - PhraseModel_4 = +85.613 - PhraseModel_5 = +238.1 - PhraseModel_6 = +158.7 - PassThrough = -359.6 - --- - 1best avg score: 0.37375 (+0.078067) - 1best avg model score: -14519 (+1359.7) - avg # pairs: 545.4 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 2218 - avg list sz: 91.3 - avg f count: 137.77 -(time 0.35 min, 2.1 s/S) - -Writing weights file to '-' ... -done - ---- -Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.37375]. -This took 1.05 min. diff --git a/training/dtrain/examples/standard/expected-output.gz b/training/dtrain/examples/standard/expected-output.gz Binary files differnew file mode 100644 index 00000000..43e6b21a --- /dev/null +++ b/training/dtrain/examples/standard/expected-output.gz diff --git a/training/dtrain/examples/standard/nc-wmt11.de.gz b/training/dtrain/examples/standard/nc-wmt11.de.gz Binary files differdeleted file mode 100644 index 0741fd92..00000000 --- a/training/dtrain/examples/standard/nc-wmt11.de.gz +++ /dev/null diff --git a/training/dtrain/examples/standard/nc-wmt11.en.gz b/training/dtrain/examples/standard/nc-wmt11.en.gz Binary files differdeleted file mode 100644 index 1c0bd401..00000000 --- a/training/dtrain/examples/standard/nc-wmt11.en.gz +++ /dev/null diff --git a/training/dtrain/examples/toy/dtrain.ini b/training/dtrain/examples/toy/dtrain.ini index ef956df7..378224b8 100644 --- a/training/dtrain/examples/toy/dtrain.ini +++ b/training/dtrain/examples/toy/dtrain.ini @@ -1,13 +1,8 @@ decoder_config=cdec.ini -input=src -refs=tgt -output=- -print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6 +bitext=in +output=weights k=4 N=4 -epochs=2 -scorer=bleu -sample_from=kbest -filter=uniq -pair_sampling=all +iterations=2 learning_rate=1 +print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6 diff --git a/training/dtrain/examples/toy/expected-output b/training/dtrain/examples/toy/expected-output index 1da2aadd..8c758d00 100644 --- a/training/dtrain/examples/toy/expected-output +++ b/training/dtrain/examples/toy/expected-output @@ -1,77 +1,63 @@ -Warning: hi_lo only works with pair_sampling XYX. - cdec cfg 'cdec.ini' -Seeding random number sequence to 1664825829 - dtrain Parameters: k 4 N 4 T 2 - scorer 'bleu' - sample from 'kbest' - filter 'uniq' learning rate 1 - gamma 0 - loss margin 0 - pairs 'all' - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'src' - refs 'tgt' - output '-' -(a dot represents 10 inputs) + error margin 0 + l1 reg 0 + decoder conf 'cdec.ini' + input 'in' + output 'weights' +(a dot per input) Iteration #1 of 2. - 2 + ... 2 WEIGHTS logp = +0 - shell_rule = -1 - house_rule = +2 - small_rule = -2 + shell_rule = +0 + house_rule = +3 + small_rule = +0 little_rule = +3 - PassThrough = -5 + PassThrough = -15 + PassThrough_1 = +0 + PassThrough_2 = +0 + PassThrough_3 = +0 + PassThrough_4 = +0 + PassThrough_5 = +0 + PassThrough_6 = +0 --- - 1best avg score: 0.5 (+0.5) - 1best avg model score: 2.5 (+2.5) - avg # pairs: 4 - avg # rank err: 1.5 - avg # margin viol: 0 - non0 feature count: 6 + 1best avg score: 0.40937 (+0.40937) + 1best avg model score: 3 + avg # pairs: 2.5 + non-0 feature count: 4 avg list sz: 4 avg f count: 2.875 (time 0 min, 0 s/S) Iteration #2 of 2. - 2 + ... 2 WEIGHTS logp = +0 - shell_rule = -1 - house_rule = +2 - small_rule = -2 + shell_rule = +0 + house_rule = +3 + small_rule = +0 little_rule = +3 - PassThrough = -5 + PassThrough = -15 + PassThrough_1 = +0 + PassThrough_2 = +0 + PassThrough_3 = +0 + PassThrough_4 = +0 + PassThrough_5 = +0 + PassThrough_6 = +0 --- - 1best avg score: 1 (+0.5) - 1best avg model score: 5 (+2.5) - avg # pairs: 5 - avg # rank err: 0 - avg # margin viol: 0 - non0 feature count: 6 + 1best avg score: 0.81873 (+0.40937) + 1best avg model score: 6 + avg # pairs: 0 + non-0 feature count: 4 avg list sz: 4 avg f count: 3 (time 0 min, 0 s/S) -Writing weights file to '-' ... -house_rule 2 -little_rule 3 -Glue -4 -PassThrough -5 -small_rule -2 -shell_rule -1 -done - --- -Best iteration: 2 [SCORE 'bleu'=1]. +Best iteration: 2 [GOLD = 0.81873]. This took 0 min. diff --git a/training/dtrain/examples/toy/in b/training/dtrain/examples/toy/in new file mode 100644 index 00000000..5d70795d --- /dev/null +++ b/training/dtrain/examples/toy/in @@ -0,0 +1,2 @@ +ich sah ein kleines haus ||| i saw a little house +ich fand ein kleines haus ||| i found a little house diff --git a/training/dtrain/examples/toy/src b/training/dtrain/examples/toy/src deleted file mode 100644 index 87e39ef2..00000000 --- a/training/dtrain/examples/toy/src +++ /dev/null @@ -1,2 +0,0 @@ -ich sah ein kleines haus -ich fand ein kleines haus diff --git a/training/dtrain/examples/toy/tgt b/training/dtrain/examples/toy/tgt deleted file mode 100644 index 174926b3..00000000 --- a/training/dtrain/examples/toy/tgt +++ /dev/null @@ -1,2 +0,0 @@ -i saw a little house -i found a little house diff --git a/training/dtrain/examples/toy/weights b/training/dtrain/examples/toy/weights new file mode 100644 index 00000000..f6f32772 --- /dev/null +++ b/training/dtrain/examples/toy/weights @@ -0,0 +1,4 @@ +house_rule 3 +little_rule 3 +Glue -12 +PassThrough -15 diff --git a/training/dtrain/kbestget.h b/training/dtrain/kbestget.h deleted file mode 100644 index 85252db3..00000000 --- a/training/dtrain/kbestget.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef _DTRAIN_KBESTGET_H_ -#define _DTRAIN_KBESTGET_H_ - -#include "kbest.h" - -namespace dtrain -{ - - -struct KBestGetter : public HypSampler -{ - const unsigned k_; - const string filter_type_; - vector<ScoredHyp> s_; - unsigned src_len_; - - KBestGetter(const unsigned k, const string filter_type) : - k_(k), filter_type_(filter_type) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - KBestScored(*hg); - } - - vector<ScoredHyp>* GetSamples() { return &s_; } - - void - KBestScored(const Hypergraph& forest) - { - if (filter_type_ == "uniq") { - KBestUnique(forest); - } else if (filter_type_ == "not") { - KBestNoFilter(forest); - } - } - - void - KBestUnique(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, - KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, KBest::FilterUnique, - prob_t, EdgeProb>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } - - void - KBestNoFilter(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } -}; - - -} // namespace - -#endif - diff --git a/training/dtrain/ksampler.h b/training/dtrain/ksampler.h deleted file mode 100644 index 29dab667..00000000 --- a/training/dtrain/ksampler.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _DTRAIN_KSAMPLER_H_ -#define _DTRAIN_KSAMPLER_H_ - -#include "hg_sampler.h" - -namespace dtrain -{ - - -bool -cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b) -{ - return a.model > b.model; -} - -struct KSampler : public HypSampler -{ - const unsigned k_; - vector<ScoredHyp> s_; - MT19937* prng_; - score_t (*scorer)(NgramCounts&, const unsigned, const unsigned, unsigned, vector<score_t>); - unsigned src_len_; - - explicit KSampler(const unsigned k, MT19937* prng) : - k_(k), prng_(prng) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - ScoredSamples(*hg); - } - - vector<ScoredHyp>* GetSamples() { return &s_; } - - void ScoredSamples(const Hypergraph& forest) { - s_.clear(); sz_ = f_count_ = 0; - std::vector<HypergraphSampler::Hypothesis> samples; - HypergraphSampler::sample_hypotheses(forest, k_, prng_, &samples); - for (unsigned i = 0; i < k_; ++i) { - ScoredHyp h; - h.w = samples[i].words; - h.f = samples[i].fmap; - h.model = log(samples[i].model_score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - sort(s_.begin(), s_.end(), cmp_hyp_by_model_d); - for (unsigned i = 0; i < s_.size(); i++) s_[i].rank = i; - } -}; - - -} // namespace - -#endif - diff --git a/training/dtrain/lplp.rb b/training/dtrain/lplp.rb index 86e835e8..a1fcd1a3 100755 --- a/training/dtrain/lplp.rb +++ b/training/dtrain/lplp.rb @@ -19,7 +19,8 @@ end # stats def median(feature_column, n) - return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0}).sort[feature_column.size/2] + return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0})\ + .sort[feature_column.size/2] end def mean(feature_column, n) @@ -28,7 +29,7 @@ end # selection def select_k(weights, norm_fun, n, k=10000) - weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p| + weights.sort{|a,b| norm_fun.call(b[1], n)<=>norm_fun.call(a[1], n)}.each { |p| puts "#{p[0]}\t#{mean(p[1], n)}" k -= 1 if k == 0 then break end diff --git a/training/dtrain/pairsampling.h b/training/dtrain/pairsampling.h deleted file mode 100644 index 1a3c498c..00000000 --- a/training/dtrain/pairsampling.h +++ /dev/null @@ -1,141 +0,0 @@ -#ifndef _DTRAIN_PAIRSAMPLING_H_ -#define _DTRAIN_PAIRSAMPLING_H_ - -namespace dtrain -{ - - -bool -accept_pair(score_t a, score_t b, score_t threshold) -{ - if (fabs(a - b) < threshold) return false; - return true; -} - -bool -cmp_hyp_by_score_d(ScoredHyp a, ScoredHyp b) -{ - return a.score > b.score; -} - -inline void -all_pairs(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool misranked_only, float _unused=1) -{ - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sz = s->size(); - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } - } - if (b) break; - } -} - -/* - * multipartite ranking - * sort (descending) by bleu - * compare top X to middle Y and low X - * cmp middle Y to low X - */ - -inline void -partXYX(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool misranked_only, float hi_lo) -{ - unsigned sz = s->size(); - if (sz < 2) return; - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sep = round(sz*hi_lo); - unsigned sep_hi = sep; - if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi; - else sep_hi = 1; - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sep_hi; i++) { - for (unsigned j = sep_hi; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } - } - if (b) break; - } - unsigned sep_lo = sz-sep; - while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo; - for (unsigned i = sep_hi; i < sz-sep_lo; i++) { - for (unsigned j = sz-sep_lo; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) return; - } - } -} - -/* - * pair sampling as in - * 'Tuning as Ranking' (Hopkins & May, 2011) - * count = 5000 - * threshold = 5% BLEU (0.05 for param 3) - * cut = top 50 - */ -bool -_PRO_cmp_pair_by_diff_d(pair<ScoredHyp,ScoredHyp> a, pair<ScoredHyp,ScoredHyp> b) -{ - return (fabs(a.first.score - a.second.score)) > (fabs(b.first.score - b.second.score)); -} -inline void -PROsampling(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool _unused=false, float _also_unused=0) -{ - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned max_count = 5000, count = 0, sz = s->size(); - bool b = false; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) { - training.push_back(make_pair((*s)[i], (*s)[j])); - if (++count == max_count) { - b = true; - break; - } - } - } - if (b) break; - } - if (training.size() > 50) { - sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff_d); - training.erase(training.begin()+50, training.end()); - } - return; -} - - -} // namespace - -#endif - diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb index 82600009..29f3e609 100755 --- a/training/dtrain/parallelize.rb +++ b/training/dtrain/parallelize.rb @@ -1,65 +1,54 @@ #!/usr/bin/env ruby require 'trollop' +require 'zipf' -def usage - STDERR.write "Usage: " - STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"] [--extra_qsub \"-l virtual_free=24G\"]\n" - exit 1 +conf = Trollop::options do + opt :config, "dtrain configuration", :type => :string + opt :input, "input as bitext (f ||| e)", :type => :string + opt :epochs, "number of epochs", :type => :int, :default => 10 + opt :lplp_args, "arguments for lplp.rb", :type => :string, :default => "l2 select_k 100000" + opt :randomize, "randomize shards once", :type => :bool, :default => false, :short => '-z' + opt :reshard, "randomize after each epoch", :type => :bool, :default => false, :short => '-y' + opt :shards, "number of shards", :type => :int + opt :weights, "input weights for first epoch", :type => :string, :default => '' + opt :per_shard_decoder_configs, "give custom decoder config per shard", :type => :string, :short => '-o' + opt :processes_at_once, "jobs to run at oce", :type => :int, :default => 9999 + opt :qsub, "use qsub", :type => :bool, :default => false + opt :qsub_args, "extra args for qsub", :type => :string, :default => "-l h_vmem=5G" + opt :dtrain_binary, "path to dtrain binary", :type => :string end -opts = Trollop::options do - opt :config, "dtrain config file", :type => :string - opt :epochs, "number of epochs", :type => :int, :default => 10 - opt :lplp_args, "arguments for lplp.rb", :type => :string, :default => "l2 select_k 100000" - opt :randomize, "randomize shards before each epoch", :type => :bool, :short => '-z', :default => false - opt :reshard, "reshard after each epoch", :type => :bool, :short => '-y', :default => false - opt :shards, "number of shards", :type => :int - opt :processes_at_once, "have this number (max) running at the same time", :type => :int, :default => 9999 - opt :input, "input", :type => :string - opt :references, "references", :type => :string - opt :qsub, "use qsub", :type => :bool, :default => false - opt :dtrain_binary, "path to dtrain binary", :type => :string - opt :extra_qsub, "extra qsub args", :type => :string, :default => "" - opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o' - opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w' -end -usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references] - dtrain_dir = File.expand_path File.dirname(__FILE__) -if not opts[:dtrain_binary] +if not conf[:dtrain_binary] dtrain_bin = "#{dtrain_dir}/dtrain" else - dtrain_bin = opts[:dtrain_binary] + dtrain_bin = conf[:dtrain_binary] end -ruby = '/usr/bin/ruby' lplp_rb = "#{dtrain_dir}/lplp.rb" -lplp_args = opts[:lplp_args] -cat = '/bin/cat' +lplp_args = conf[:lplp_args] -ini = opts[:config] -epochs = opts[:epochs] -rand = opts[:randomize] -reshard = opts[:reshard] -predefined_shards = false +dtrain_conf = conf[:config] +epochs = conf[:epochs] +rand = conf[:randomize] +reshard = conf[:reshard] +predefined_shards = false per_shard_decoder_configs = false -if opts[:shards] == 0 +if conf[:shards] == 0 predefined_shards = true num_shards = 0 - per_shard_decoder_configs = true if opts[:per_shard_decoder_configs] + per_shard_decoder_configs = true if conf[:per_shard_decoder_configs] else - num_shards = opts[:shards] + num_shards = conf[:shards] end -input = opts[:input] -refs = opts[:references] -use_qsub = opts[:qsub] -shards_at_once = opts[:processes_at_once] -first_input_weights = opts[:first_input_weights] -opts[:extra_qsub] = "-l #{opts[:extra_qsub]}" if opts[:extra_qsub]!="" +input = conf[:input] +use_qsub = conf[:qsub] +shards_at_once = conf[:processes_at_once] +first_input_weights = conf[:weights] `mkdir work` -def make_shards(input, refs, num_shards, epoch, rand) +def make_shards input, num_shards, epoch, rand lc = `wc -l #{input}`.split.first.to_i index = (0..lc-1).to_a index.reverse! @@ -69,12 +58,8 @@ def make_shards(input, refs, num_shards, epoch, rand) leftover = 0 if leftover < 0 in_f = File.new input, 'r' in_lines = in_f.readlines - refs_f = File.new refs, 'r' - refs_lines = refs_f.readlines shard_in_files = [] - shard_refs_files = [] in_fns = [] - refs_fns = [] new_num_shards = 0 0.upto(num_shards-1) { |shard| break if index.size==0 @@ -82,40 +67,33 @@ def make_shards(input, refs, num_shards, epoch, rand) in_fn = "work/shard.#{shard}.#{epoch}.in" shard_in = File.new in_fn, 'w+' in_fns << in_fn - refs_fn = "work/shard.#{shard}.#{epoch}.refs" - shard_refs = File.new refs_fn, 'w+' - refs_fns << refs_fn 0.upto(shard_sz-1) { |i| j = index.pop + break if !j shard_in.write in_lines[j] - shard_refs.write refs_lines[j] } shard_in_files << shard_in - shard_refs_files << shard_refs } while leftover > 0 j = index.pop shard_in_files[-1].write in_lines[j] - shard_refs_files[-1].write refs_lines[j] leftover -= 1 end - (shard_in_files + shard_refs_files).each do |f| f.close end + shard_in_files.each do |f| f.close end in_f.close - refs_f.close - return in_fns, refs_fns, new_num_shards + return in_fns, new_num_shards end input_files = [] -refs_files = [] if predefined_shards input_files = File.new(input).readlines.map {|i| i.strip } - refs_files = File.new(refs).readlines.map {|i| i.strip } if per_shard_decoder_configs - decoder_configs = File.new(opts[:per_shard_decoder_configs]).readlines.map {|i| i.strip} + decoder_configs = ReadFile.readlines_strip(conf[:per_shard_decoder_configs] + ).map { |i| i.strip } end num_shards = input_files.size else - input_files, refs_files, num_shards = make_shards input, refs, num_shards, 0, rand + input_files, num_shards = make_shards input, num_shards, 0, rand end 0.upto(epochs-1) { |epoch| @@ -132,24 +110,30 @@ end qsub_str_start = qsub_str_end = '' local_end = '' if use_qsub - qsub_str_start = "qsub #{opts[:extra_qsub]} -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \"" + qsub_str_start = "qsub #{conf[:qsub_args]} -cwd -sync y -b y -j y\ + -o work/out.#{shard}.#{epoch}\ + -N dtrain.#{shard}.#{epoch} \"" qsub_str_end = "\"" local_end = '' else local_end = "2>work/out.#{shard}.#{epoch}" end if per_shard_decoder_configs - cdec_cfg = "--decoder_config #{decoder_configs[shard]}" + cdec_conf = "--decoder_config #{decoder_configs[shard]}" else - cdec_cfg = "" + cdec_conf = "" end if first_input_weights!='' && epoch == 0 input_weights = "--input_weights #{first_input_weights}" end pids << Kernel.fork { - `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\ - --input #{input_files[shard]}\ - --refs #{refs_files[shard]}\ + puts "#{qsub_str_start}#{dtrain_bin} -c #{dtrain_conf} #{cdec_conf}\ + #{input_weights}\ + --bitext #{input_files[shard]}\ + --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}" + `#{qsub_str_start}#{dtrain_bin} -c #{dtrain_conf} #{cdec_conf}\ + #{input_weights}\ + --bitext #{input_files[shard]}\ --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}` } weights_files << "work/weights.#{shard}.#{epoch}" @@ -159,10 +143,11 @@ end pids.each { |pid| Process.wait(pid) } pids.clear end - `#{cat} work/weights.*.#{epoch} > work/weights_cat` - `#{ruby} #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat > work/weights.#{epoch}` + `cat work/weights.*.#{epoch} > work/weights_cat` + `ruby #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat\ + > work/weights.#{epoch}` if rand and reshard and epoch+1!=epochs - input_files, refs_files, num_shards = make_shards input, refs, num_shards, epoch+1, rand + input_files, num_shards = make_shards input, num_shards, epoch+1, rand end } diff --git a/training/dtrain/sample.h b/training/dtrain/sample.h new file mode 100644 index 00000000..c3586c58 --- /dev/null +++ b/training/dtrain/sample.h @@ -0,0 +1,62 @@ +#ifndef _DTRAIN_SAMPLE_H_ +#define _DTRAIN_SAMPLE_H_ + +#include "kbest.h" + +namespace dtrain +{ + + +struct ScoredKbest : public DecoderObserver +{ + const size_t k_; + vector<ScoredHyp> s_; + size_t src_len_; + PerSentenceBleuScorer* scorer_; + vector<vector<WordID> >* refs_; + vector<Ngrams>* ref_ngs_; + vector<size_t>* ref_ls_; + size_t f_count_, sz_; + + ScoredKbest(const size_t k, PerSentenceBleuScorer* scorer) : + k_(k), scorer_(scorer) {} + + virtual void + NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) + { + src_len_ = smeta.GetSourceLength(); + s_.clear(); sz_ = f_count_ = 0; + KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, + KBest::FilterUnique, prob_t, EdgeProb> kbest(*hg, k_); + for (size_t i = 0; i < k_; ++i) { + const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, KBest::FilterUnique, + prob_t, EdgeProb>::Derivation* d = + kbest.LazyKthBest(hg->nodes_.size() - 1, i); + if (!d) break; + ScoredHyp h; + h.w = d->yield; + h.f = d->feature_values; + h.model = log(d->score); + h.rank = i; + h.gold = scorer_->Score(h.w, *ref_ngs_, *ref_ls_); + s_.push_back(h); + sz_++; + f_count_ += h.f.size(); + } + } + + vector<ScoredHyp>* GetSamples() { return &s_; } + inline void SetReference(vector<Ngrams>& ngs, vector<size_t>& ls) + { + ref_ngs_ = &ngs; + ref_ls_ = &ls; + } + inline size_t GetFeatureCount() { return f_count_; } + inline size_t GetSize() { return sz_; } +}; + + +} // namespace + +#endif + diff --git a/training/dtrain/score.cc b/training/dtrain/score.cc deleted file mode 100644 index 127f34d2..00000000 --- a/training/dtrain/score.cc +++ /dev/null @@ -1,283 +0,0 @@ -#include "score.h" - -namespace dtrain -{ - - -/* - * bleu - * - * as in "BLEU: a Method for Automatic Evaluation - * of Machine Translation" - * (Papineni et al. '02) - * - * NOTE: 0 if for one n \in {1..N} count is 0 - */ -score_t -BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len) -{ - if (hyp_len == 0 || ref_len == 0) return 0.; - unsigned M = N_; - vector<score_t> v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) return 0.; - sum += v[i] * log((score_t)counts.clipped_[i]/counts.sum_[i]); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -score_t -BleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - return Bleu(counts, hyp_len, ref_len); -} - -/* - * 'stupid' bleu - * - * as in "ORANGE: a Method for Evaluating - * Automatic Evaluation Metrics - * for Machine Translation" - * (Lin & Och '04) - * - * NOTE: 0 iff no 1gram match ('grounded') - */ -score_t -StupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector<score_t> v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0, add = 0; - for (unsigned i = 0; i < M; i++) { - if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; - if (i == 1) add = 1; - sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -/* - * fixed 'stupid' bleu - * - * as in "Optimizing for Sentence-Level BLEU+1 - * Yields Short Translations" - * (Nakov et al. '12) - */ -score_t -FixedStupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector<score_t> v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0, add = 0; - for (unsigned i = 0; i < M; i++) { - if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; - if (i == 1) add = 1; - sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); - } - return brevity_penalty(hyp_len, ref_len+1) * exp(sum); // <- fix -} - -/* - * smooth bleu - * - * as in "An End-to-End Discriminative Approach - * to Machine Translation" - * (Liang et al. '06) - * - * NOTE: max is 0.9375 (with N=4) - */ -score_t -SmoothBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - vector<score_t> i_bleu; - for (unsigned i = 0; i < M; i++) i_bleu.push_back(0.); - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) { - break; - } else { - score_t i_ng = log((score_t)counts.clipped_[i]/counts.sum_[i]); - for (unsigned j = i; j < M; j++) { - i_bleu[j] += (1/((score_t)j+1)) * i_ng; - } - } - sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i)); - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' bleu - * - * sum up Ngram precisions - */ -score_t -SumBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += ((score_t)counts.clipped_[i]/counts.sum_[i])/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (exp) bleu - * - * sum up exp(Ngram precisions) - */ -score_t -SumExpBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(((score_t)counts.clipped_[i]/counts.sum_[i]))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (whatever) bleu - * - * sum up exp(weight * log(Ngram precisions)) - */ -score_t -SumWhateverBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector<score_t> v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(v[i] * log(((score_t)counts.clipped_[i]/counts.sum_[i])))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * approx. bleu - * - * as in "Online Large-Margin Training of Syntactic - * and Structural Translation Features" - * (Chiang et al. '08) - * - * NOTE: Needs some more code in dtrain.cc . - * No scaling by src len. - */ -score_t -ApproxBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned rank, const unsigned src_len) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - score_t score = 0.; - NgramCounts counts(N_); - if (hyp_len > 0) { - counts = make_ngram_counts(hyp, ref, N_); - NgramCounts tmp = glob_onebest_counts_ + counts; - score = Bleu(tmp, hyp_len, ref_len); - } - if (rank == 0) { // 'context of 1best translations' - glob_onebest_counts_ += counts; - glob_onebest_counts_ *= discount_; - glob_hyp_len_ = discount_ * (glob_hyp_len_ + hyp_len); - glob_ref_len_ = discount_ * (glob_ref_len_ + ref_len); - glob_src_len_ = discount_ * (glob_src_len_ + src_len); - } - return score; -} - -/* - * Linear (Corpus) Bleu - * - * as in "Lattice Minimum Bayes-Risk Decoding - * for Statistical Machine Translation" - * (Tromble et al. '08) - * - */ -score_t -LinearBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, - const unsigned rank, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - unsigned M = N_; - if (ref_len < N_) M = ref_len; - NgramCounts counts(M); - if (hyp_len > 0) - counts = make_ngram_counts(hyp, ref, M); - score_t ret = 0.; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || onebest_counts_.sum_[i] == 0) break; - ret += counts.sum_[i]/onebest_counts_.sum_[i]; - } - ret = -(hyp_len/(score_t)onebest_len_) + (1./M) * ret; - if (rank == 0) { - onebest_len_ += hyp_len; - onebest_counts_ += counts; - } - return ret; -} - - -} // namespace - diff --git a/training/dtrain/score.h b/training/dtrain/score.h index 1cdd3fa9..d51aef82 100644 --- a/training/dtrain/score.h +++ b/training/dtrain/score.h @@ -6,20 +6,19 @@ namespace dtrain { - struct NgramCounts { - unsigned N_; - map<unsigned, score_t> clipped_; - map<unsigned, score_t> sum_; + size_t N_; + map<size_t, weight_t> clipped_; + map<size_t, weight_t> sum_; - NgramCounts(const unsigned N) : N_(N) { Zero(); } + NgramCounts(const size_t N) : N_(N) { Zero(); } inline void operator+=(const NgramCounts& rhs) { if (rhs.N_ > N_) Resize(rhs.N_); - for (unsigned i = 0; i < N_; i++) { + for (size_t i = 0; i < N_; i++) { this->clipped_[i] += rhs.clipped_.find(i)->second; this->sum_[i] += rhs.sum_.find(i)->second; } @@ -30,20 +29,21 @@ struct NgramCounts { NgramCounts result = *this; result += other; + return result; } inline void - operator*=(const score_t rhs) + operator*=(const weight_t rhs) { - for (unsigned i = 0; i < N_; i++) { + for (size_t i = 0; i < N_; i++) { this->clipped_[i] *= rhs; this->sum_[i] *= rhs; } } inline void - Add(const unsigned count, const unsigned ref_count, const unsigned i) + Add(const size_t count, const size_t ref_count, const size_t i) { assert(i < N_); if (count > ref_count) { @@ -57,40 +57,31 @@ struct NgramCounts inline void Zero() { - for (unsigned i = 0; i < N_; i++) { + for (size_t i = 0; i < N_; i++) { clipped_[i] = 0.; sum_[i] = 0.; } } inline void - One() - { - for (unsigned i = 0; i < N_; i++) { - clipped_[i] = 1.; - sum_[i] = 1.; - } - } - - inline void - Print() + Print(ostream& os=cerr) { - for (unsigned i = 0; i < N_; i++) { - cout << i+1 << "grams (clipped):\t" << clipped_[i] << endl; - cout << i+1 << "grams:\t\t\t" << sum_[i] << endl; + for (size_t i = 0; i < N_; i++) { + os << i+1 << "grams (clipped):\t" << clipped_[i] << endl; + os << i+1 << "grams:\t\t\t" << sum_[i] << endl; } } - inline void Resize(unsigned N) + inline void Resize(size_t N) { if (N == N_) return; else if (N > N_) { - for (unsigned i = N_; i < N; i++) { + for (size_t i = N_; i < N; i++) { clipped_[i] = 0.; sum_[i] = 0.; } } else { // N < N_ - for (unsigned i = N_-1; i > N-1; i--) { + for (size_t i = N_-1; i > N-1; i--) { clipped_.erase(i); sum_.erase(i); } @@ -99,123 +90,116 @@ struct NgramCounts } }; -typedef map<vector<WordID>, unsigned> Ngrams; +typedef map<vector<WordID>, size_t> Ngrams; inline Ngrams -make_ngrams(const vector<WordID>& s, const unsigned N) +MakeNgrams(const vector<WordID>& s, const size_t N) { Ngrams ngrams; vector<WordID> ng; for (size_t i = 0; i < s.size(); i++) { ng.clear(); - for (unsigned j = i; j < min(i+N, s.size()); j++) { + for (size_t j = i; j < min(i+N, s.size()); j++) { ng.push_back(s[j]); ngrams[ng]++; } } + return ngrams; } inline NgramCounts -make_ngram_counts(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned N) +MakeNgramCounts(const vector<WordID>& hyp, + const vector<Ngrams>& ref, + const size_t N) { - Ngrams hyp_ngrams = make_ngrams(hyp, N); - Ngrams ref_ngrams = make_ngrams(ref, N); + Ngrams hyp_ngrams = MakeNgrams(hyp, N); NgramCounts counts(N); - Ngrams::iterator it; - Ngrams::iterator ti; + Ngrams::iterator it, ti; for (it = hyp_ngrams.begin(); it != hyp_ngrams.end(); it++) { - ti = ref_ngrams.find(it->first); - if (ti != ref_ngrams.end()) { - counts.Add(it->second, ti->second, it->first.size() - 1); - } else { - counts.Add(it->second, 0, it->first.size() - 1); + size_t max_ref_count = 0; + for (auto r: ref) { + ti = r.find(it->first); + if (ti != r.end()) + max_ref_count = max(max_ref_count, ti->second); } + counts.Add(it->second, min(it->second, max_ref_count), it->first.size()-1); } + return counts; } -struct BleuScorer : public LocalScorer +/* + * per-sentence BLEU + * as in "Optimizing for Sentence-Level BLEU+1 + * Yields Short Translations" + * (Nakov et al. '12) + * + * [simply add 1 to reference length for calculation of BP] + * + */ +struct PerSentenceBleuScorer { - score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len); - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; - -struct StupidBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; - -struct FixedStupidBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; - -struct SmoothBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; - -struct SumBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; + const size_t N_; + vector<weight_t> w_; -struct SumExpBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {} -}; - -struct SumWhateverBleuScorer : public LocalScorer -{ - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); - void Reset() {}; -}; - -struct ApproxBleuScorer : public BleuScorer -{ - NgramCounts glob_onebest_counts_; - unsigned glob_hyp_len_, glob_ref_len_, glob_src_len_; - score_t discount_; - - ApproxBleuScorer(unsigned N, score_t d) : glob_onebest_counts_(NgramCounts(N)), discount_(d) + PerSentenceBleuScorer(size_t n) : N_(n) { - glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0; + for (size_t i = 1; i <= N_; i++) + w_.push_back(1.0/N_); } - inline void Reset() { - glob_onebest_counts_.Zero(); - glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0.; - } - - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len); -}; - -struct LinearBleuScorer : public BleuScorer -{ - unsigned onebest_len_; - NgramCounts onebest_counts_; - - LinearBleuScorer(unsigned N) : onebest_len_(1), onebest_counts_(N) + inline weight_t + BrevityPenalty(const size_t hl, const size_t rl) { - onebest_counts_.One(); + if (hl > rl) + return 1; + + return exp(1 - (weight_t)rl/hl); } - score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/); + weight_t + Score(const vector<WordID>& hyp, + const vector<Ngrams>& ref_ngs, + const vector<size_t>& ref_ls) + { + size_t hl = hyp.size(), rl = 0; + if (hl == 0) return 0.; + // best match reference length + if (ref_ls.size() == 1) { + rl = ref_ls.front(); + } else { + size_t i = 0, best_idx = 0; + size_t best = numeric_limits<size_t>::max(); + for (auto l: ref_ls) { + size_t d = abs(hl-l); + if (d < best) { + best_idx = i; + best = d; + } + i += 1; + } + rl = ref_ls[best_idx]; + } + if (rl == 0) return 0.; + NgramCounts counts = MakeNgramCounts(hyp, ref_ngs, N_); + size_t M = N_; + vector<weight_t> v = w_; + if (rl < N_) { + M = rl; + for (size_t i = 0; i < M; i++) v[i] = 1/((weight_t)M); + } + weight_t sum = 0, add = 0; + for (size_t i = 0; i < M; i++) { + if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; + if (i == 1) add = 1; + sum += v[i] * log(((weight_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); + } - inline void Reset() { - onebest_len_ = 1; - onebest_counts_.One(); + return BrevityPenalty(hl, rl+1) * exp(sum); } }; - } // namespace #endif diff --git a/training/dtrain/update.h b/training/dtrain/update.h new file mode 100644 index 00000000..57671ce1 --- /dev/null +++ b/training/dtrain/update.h @@ -0,0 +1,65 @@ +#ifndef _DTRAIN_UPDATE_H_ +#define _DTRAIN_UPDATE_H_ + +namespace dtrain +{ + +bool +CmpHypsByGold(ScoredHyp a, ScoredHyp b) +{ + return a.gold > b.gold; +} + +/* + * multipartite ranking + * sort (descending) by bleu + * compare top X (hi) to middle Y (med) and low X (lo) + * cmp middle Y to low X + */ +inline size_t +CollectUpdates(vector<ScoredHyp>* s, + SparseVector<weight_t>& updates, + float margin=1.0) +{ + size_t num_pairs = 0; + size_t sz = s->size(); + if (sz < 2) return 0; + sort(s->begin(), s->end(), CmpHypsByGold); + size_t sep = round(sz*0.1); + size_t sep_hi = sep; + if (sz > 4) { + while + (sep_hi < sz && (*s)[sep_hi-1].gold == (*s)[sep_hi].gold) ++sep_hi; + } + else sep_hi = 1; + for (size_t i = 0; i < sep_hi; i++) { + for (size_t j = sep_hi; j < sz; j++) { + if (((*s)[i].model-(*s)[j].model) > margin) + continue; + if ((*s)[i].gold != (*s)[j].gold) { + updates += (*s)[i].f-(*s)[j].f; + num_pairs++; + } + } + } + size_t sep_lo = sz-sep; + while (sep_lo > 0 && (*s)[sep_lo-1].gold == (*s)[sep_lo].gold) + --sep_lo; + for (size_t i = sep_hi; i < sep_lo; i++) { + for (size_t j = sep_lo; j < sz; j++) { + if (((*s)[i].model-(*s)[j].model) > margin) + continue; + if ((*s)[i].gold != (*s)[j].gold) { + updates += (*s)[i].f-(*s)[j].f; + num_pairs++; + } + } + } + + return num_pairs; +} + +} // namespace + +#endif + diff --git a/training/pro/pro.pl b/training/pro/pro.pl index a059477d..8ebb5864 100755 --- a/training/pro/pro.pl +++ b/training/pro/pro.pl @@ -69,18 +69,19 @@ my $reg_previous = 5000; # Process command-line options if (GetOptions( - "config=s" => \$iniFile, - "weights=s" => \$initial_weights, - "devset=s" => \$devset, - "jobs=i" => \$jobs, - "metric=s" => \$metric, - "pass-suffix=s" => \$pass_suffix, - "qsub" => \$useqsub, - "help" => \$help, - "reg=f" => \$reg, - "reg-previous=f" => \$reg_previous, + "config=s" => \$iniFile, + "weights=s" => \$initial_weights, + "devset=s" => \$devset, + "jobs=i" => \$jobs, + "max-iterations=i" => \$max_iterations, + "metric=s" => \$metric, + "pass-suffix=s" => \$pass_suffix, + "qsub" => \$useqsub, + "help" => \$help, + "reg=f" => \$reg, + "reg-previous=f" => \$reg_previous, "pmem=s" => \$pmem, - "output-dir=s" => \$dir, + "output-dir=s" => \$dir, ) == 0 || @ARGV!=0 || $help) { print_help(); exit; diff --git a/utils/fast_sparse_vector.h b/utils/fast_sparse_vector.h index 1e0ab428..4e197f73 100644 --- a/utils/fast_sparse_vector.h +++ b/utils/fast_sparse_vector.h @@ -319,6 +319,12 @@ class FastSparseVector { get_or_create_bin(it->first) += it->second * scalar; } } + template <typename O> + inline void cw_mult(FastSparseVector<O>& other) { + for (iterator it = begin(); it != end(); ++it) { + it->second = other[it->first] * it->second; + } + } inline FastSparseVector& operator-=(const FastSparseVector& other) { const typename FastSparseVector::const_iterator end = other.end(); for (typename FastSparseVector::const_iterator it = other.begin(); it != end; ++it) { diff --git a/word-aligner/fast_align.cc b/word-aligner/fast_align.cc index 73b72399..a3b346ec 100644 --- a/word-aligner/fast_align.cc +++ b/word-aligner/fast_align.cc @@ -43,7 +43,8 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { ("testset,x", po::value<string>(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model") ("no_add_viterbi,V","When writing model parameters, do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)") ("force_align,f",po::value<string>(), "Load previously written parameters to 'force align' input. Set --diagonal_tension and --mean_srclen_multiplier as estimated during training.") - ("mean_srclen_multiplier,m",po::value<double>()->default_value(1), "When --force_align, use this source length multiplier"); + ("mean_srclen_multiplier,m",po::value<double>()->default_value(1), "When --force_align, use this source length multiplier") + ("init_ttable,J",po::value<string>(), "Initialize ttable with this file (output of -p). Also give --diagonal_tension."); po::options_description clo("Command line options"); clo.add_options() ("config", po::value<string>(), "Configuration file") @@ -109,6 +110,11 @@ int main(int argc, char** argv) { s2t.DeserializeLogProbsFromText(s2t_f.stream()); mean_srclen_multiplier = conf["mean_srclen_multiplier"].as<double>(); } + + if (conf.count("init_ttable")) { + ReadFile s2t_f(conf["init_ttable"].as<string>()); + s2t.DeserializeLogProbsFromText(s2t_f.stream()); + } for (int iter = 0; iter < ITERATIONS; ++iter) { const bool final_iteration = (iter == (ITERATIONS - 1)); |