From 8aa29810bb77611cc20b7a384897ff6703783ea1 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sun, 18 Nov 2012 13:35:42 -0500 Subject: major restructure of the training code --- dtrain/Makefile.am | 7 - dtrain/README.md | 48 --- dtrain/dtrain.cc | 657 ------------------------------ dtrain/dtrain.h | 97 ----- dtrain/hstreaming/avg.rb | 32 -- dtrain/hstreaming/cdec.ini | 22 - dtrain/hstreaming/dtrain.ini | 15 - dtrain/hstreaming/dtrain.sh | 9 - dtrain/hstreaming/hadoop-streaming-job.sh | 30 -- dtrain/hstreaming/lplp.rb | 131 ------ dtrain/hstreaming/red-test | 9 - dtrain/kbestget.h | 152 ------- dtrain/ksampler.h | 61 --- dtrain/pairsampling.h | 149 ------- dtrain/parallelize.rb | 79 ---- dtrain/parallelize/test/cdec.ini | 22 - dtrain/parallelize/test/dtrain.ini | 15 - dtrain/parallelize/test/in | 10 - dtrain/parallelize/test/refs | 10 - dtrain/score.cc | 254 ------------ dtrain/score.h | 212 ---------- dtrain/test/example/README | 8 - dtrain/test/example/cdec.ini | 25 -- dtrain/test/example/dtrain.ini | 22 - dtrain/test/example/expected-output | 89 ---- dtrain/test/parallelize/cdec.ini | 22 - dtrain/test/parallelize/dtrain.ini | 15 - dtrain/test/parallelize/in | 10 - dtrain/test/parallelize/refs | 10 - dtrain/test/toy/cdec.ini | 2 - dtrain/test/toy/dtrain.ini | 12 - dtrain/test/toy/input | 2 - 32 files changed, 2238 deletions(-) delete mode 100644 dtrain/Makefile.am delete mode 100644 dtrain/README.md delete mode 100644 dtrain/dtrain.cc delete mode 100644 dtrain/dtrain.h delete mode 100755 dtrain/hstreaming/avg.rb delete mode 100644 dtrain/hstreaming/cdec.ini delete mode 100644 dtrain/hstreaming/dtrain.ini delete mode 100755 dtrain/hstreaming/dtrain.sh delete mode 100755 dtrain/hstreaming/hadoop-streaming-job.sh delete mode 100755 dtrain/hstreaming/lplp.rb delete mode 100644 dtrain/hstreaming/red-test delete mode 100644 dtrain/kbestget.h delete mode 100644 dtrain/ksampler.h delete mode 100644 dtrain/pairsampling.h delete mode 100755 dtrain/parallelize.rb delete mode 100644 dtrain/parallelize/test/cdec.ini delete mode 100644 dtrain/parallelize/test/dtrain.ini delete mode 100644 dtrain/parallelize/test/in delete mode 100644 dtrain/parallelize/test/refs delete mode 100644 dtrain/score.cc delete mode 100644 dtrain/score.h delete mode 100644 dtrain/test/example/README delete mode 100644 dtrain/test/example/cdec.ini delete mode 100644 dtrain/test/example/dtrain.ini delete mode 100644 dtrain/test/example/expected-output delete mode 100644 dtrain/test/parallelize/cdec.ini delete mode 100644 dtrain/test/parallelize/dtrain.ini delete mode 100644 dtrain/test/parallelize/in delete mode 100644 dtrain/test/parallelize/refs delete mode 100644 dtrain/test/toy/cdec.ini delete mode 100644 dtrain/test/toy/dtrain.ini delete mode 100644 dtrain/test/toy/input (limited to 'dtrain') diff --git a/dtrain/Makefile.am b/dtrain/Makefile.am deleted file mode 100644 index ca9581f5..00000000 --- a/dtrain/Makefile.am +++ /dev/null @@ -1,7 +0,0 @@ -bin_PROGRAMS = dtrain - -dtrain_SOURCES = dtrain.cc score.cc -dtrain_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/klm/search/libksearch.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval - diff --git a/dtrain/README.md b/dtrain/README.md deleted file mode 100644 index 7edabbf1..00000000 --- a/dtrain/README.md +++ /dev/null @@ -1,48 +0,0 @@ -This is a simple (and parallelizable) tuning method for cdec -which is able to train the weights of very many (sparse) features. -It was used here: - "Joint Feature Selection in Distributed Stochastic - Learning for Large-Scale Discriminative Training in - SMT" -(Simianer, Riezler, Dyer; ACL 2012) - - -Building --------- -Builds when building cdec, see ../BUILDING . -To build only parts needed for dtrain do -``` - autoreconf -ifv - ./configure [--disable-gtest] - cd dtrain/; make -``` - -Running -------- -To run this on a dev set locally: -``` - #define DTRAIN_LOCAL -``` -otherwise remove that line or undef, then recompile. You need a single -grammar file or input annotated with per-sentence grammars (psg) as you -would use with cdec. Additionally you need to give dtrain a file with -references (--refs) when running locally. - -The input for use with hadoop streaming looks like this: -``` - \t\t\t -``` -To convert a psg to this format you need to replace all "\n" -by "\t". Make sure there are no tabs in your data. - -For an example of local usage (with the 'distributed' format) -the see test/example/ . This expects dtrain to be built without -DTRAIN_LOCAL. - -Legal ------ -Copyright (c) 2012 by Patrick Simianer - -See the file ../LICENSE.txt for the licensing terms that this software is -released under. - diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc deleted file mode 100644 index 18286668..00000000 --- a/dtrain/dtrain.cc +++ /dev/null @@ -1,657 +0,0 @@ -#include "dtrain.h" - - -bool -dtrain_init(int argc, char** argv, po::variables_map* cfg) -{ - po::options_description ini("Configuration File Options"); - ini.add_options() - ("input", po::value()->default_value("-"), "input file") - ("output", po::value()->default_value("-"), "output weights file, '-' for STDOUT") - ("input_weights", po::value(), "input weights file (e.g. from previous iteration)") - ("decoder_config", po::value(), "configuration file for cdec") - ("print_weights", po::value(), "weights to print on each iteration") - ("stop_after", po::value()->default_value(0), "stop after X input sentences") - ("tmp", po::value()->default_value("/tmp"), "temp dir to use") - ("keep", po::value()->zero_tokens(), "keep weights files for each iteration") - ("hstreaming", po::value(), "run in hadoop streaming mode, arg is a task id") - ("epochs", po::value()->default_value(10), "# of iterations T (per shard)") - ("k", po::value()->default_value(100), "how many translations to sample") - ("sample_from", po::value()->default_value("kbest"), "where to sample translations from: 'kbest', 'forest'") - ("filter", po::value()->default_value("uniq"), "filter kbest list: 'not', 'uniq'") - ("pair_sampling", po::value()->default_value("XYX"), "how to sample pairs: 'all', 'XYX' or 'PRO'") - ("hi_lo", po::value()->default_value(0.1), "hi and lo (X) for XYX (default 0.1), <= 0.5") - ("pair_threshold", po::value()->default_value(0.), "bleu [0,1] threshold to filter pairs") - ("N", po::value()->default_value(4), "N for Ngrams (BLEU)") - ("scorer", po::value()->default_value("stupid_bleu"), "scoring: bleu, stupid_, smooth_, approx_, lc_") - ("learning_rate", po::value()->default_value(1.0), "learning rate") - ("gamma", po::value()->default_value(0.), "gamma for SVM (0 for perceptron)") - ("select_weights", po::value()->default_value("last"), "output best, last, avg weights ('VOID' to throw away)") - ("rescale", po::value()->zero_tokens(), "rescale weight vector after each input") - ("l1_reg", po::value()->default_value("none"), "apply l1 regularization as in 'Tsuroka et al' (2010)") - ("l1_reg_strength", po::value(), "l1 regularization strength") - ("fselect", po::value()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO - ("approx_bleu_d", po::value()->default_value(0.9), "discount for approx. BLEU") - ("scale_bleu_diff", po::value()->zero_tokens(), "learning rate <- bleu diff of a misranked pair") - ("loss_margin", po::value()->default_value(0.), "update if no error in pref pair but model scores this near") - ("max_pairs", po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.") -#ifdef DTRAIN_LOCAL - ("refs,r", po::value(), "references in local mode") -#endif - ("noup", po::value()->zero_tokens(), "do not update weights"); - po::options_description cl("Command Line Options"); - cl.add_options() - ("config,c", po::value(), "dtrain config file") - ("quiet,q", po::value()->zero_tokens(), "be quiet") - ("verbose,v", po::value()->zero_tokens(), "be verbose"); - cl.add(ini); - po::store(parse_command_line(argc, argv, cl), *cfg); - if (cfg->count("config")) { - ifstream ini_f((*cfg)["config"].as().c_str()); - po::store(po::parse_config_file(ini_f, ini), *cfg); - } - po::notify(*cfg); - if (!cfg->count("decoder_config")) { - cerr << cl << endl; - return false; - } - if (cfg->count("hstreaming") && (*cfg)["output"].as() != "-") { - cerr << "When using 'hstreaming' the 'output' param should be '-'." << endl; - return false; - } -#ifdef DTRAIN_LOCAL - if ((*cfg)["input"].as() == "-") { - cerr << "Can't use stdin as input with this binary. Recompile without DTRAIN_LOCAL" << endl; - return false; - } -#endif - if ((*cfg)["sample_from"].as() != "kbest" - && (*cfg)["sample_from"].as() != "forest") { - cerr << "Wrong 'sample_from' param: '" << (*cfg)["sample_from"].as() << "', use 'kbest' or 'forest'." << endl; - return false; - } - if ((*cfg)["sample_from"].as() == "kbest" && (*cfg)["filter"].as() != "uniq" && - (*cfg)["filter"].as() != "not") { - cerr << "Wrong 'filter' param: '" << (*cfg)["filter"].as() << "', use 'uniq' or 'not'." << endl; - return false; - } - if ((*cfg)["pair_sampling"].as() != "all" && (*cfg)["pair_sampling"].as() != "XYX" && - (*cfg)["pair_sampling"].as() != "PRO") { - cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as() << "'." << endl; - return false; - } - if(cfg->count("hi_lo") && (*cfg)["pair_sampling"].as() != "XYX") { - cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl; - } - if((*cfg)["hi_lo"].as() > 0.5 || (*cfg)["hi_lo"].as() < 0.01) { - cerr << "hi_lo must lie in [0.01, 0.5]" << endl; - return false; - } - if ((*cfg)["pair_threshold"].as() < 0) { - cerr << "The threshold must be >= 0!" << endl; - return false; - } - if ((*cfg)["select_weights"].as() != "last" && (*cfg)["select_weights"].as() != "best" && - (*cfg)["select_weights"].as() != "avg" && (*cfg)["select_weights"].as() != "VOID") { - cerr << "Wrong 'select_weights' param: '" << (*cfg)["select_weights"].as() << "', use 'last' or 'best'." << endl; - return false; - } - return true; -} - -int -main(int argc, char** argv) -{ - // handle most parameters - po::variables_map cfg; - if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong - bool quiet = false; - if (cfg.count("quiet")) quiet = true; - bool verbose = false; - if (cfg.count("verbose")) verbose = true; - bool noup = false; - if (cfg.count("noup")) noup = true; - bool hstreaming = false; - string task_id; - if (cfg.count("hstreaming")) { - hstreaming = true; - quiet = true; - task_id = cfg["hstreaming"].as(); - cerr.precision(17); - } - bool rescale = false; - if (cfg.count("rescale")) rescale = true; - HSReporter rep(task_id); - bool keep = false; - if (cfg.count("keep")) keep = true; - - const unsigned k = cfg["k"].as(); - const unsigned N = cfg["N"].as(); - const unsigned T = cfg["epochs"].as(); - const unsigned stop_after = cfg["stop_after"].as(); - const string filter_type = cfg["filter"].as(); - const string sample_from = cfg["sample_from"].as(); - const string pair_sampling = cfg["pair_sampling"].as(); - const score_t pair_threshold = cfg["pair_threshold"].as(); - const string select_weights = cfg["select_weights"].as(); - const float hi_lo = cfg["hi_lo"].as(); - const score_t approx_bleu_d = cfg["approx_bleu_d"].as(); - const unsigned max_pairs = cfg["max_pairs"].as(); - weight_t loss_margin = cfg["loss_margin"].as(); - if (loss_margin > 9998.) loss_margin = std::numeric_limits::max(); - bool scale_bleu_diff = false; - if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true; - bool average = false; - if (select_weights == "avg") - average = true; - vector print_weights; - if (cfg.count("print_weights")) - boost::split(print_weights, cfg["print_weights"].as(), boost::is_any_of(" ")); - - // setup decoder - register_feature_functions(); - SetSilent(true); - ReadFile ini_rf(cfg["decoder_config"].as()); - if (!quiet) - cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl; - Decoder decoder(ini_rf.stream()); - - // scoring metric/scorer - string scorer_str = cfg["scorer"].as(); - LocalScorer* scorer; - if (scorer_str == "bleu") { - scorer = dynamic_cast(new BleuScorer); - } else if (scorer_str == "stupid_bleu") { - scorer = dynamic_cast(new StupidBleuScorer); - } else if (scorer_str == "smooth_bleu") { - scorer = dynamic_cast(new SmoothBleuScorer); - } else if (scorer_str == "sum_bleu") { - scorer = dynamic_cast(new SumBleuScorer); - } else if (scorer_str == "sumexp_bleu") { - scorer = dynamic_cast(new SumExpBleuScorer); - } else if (scorer_str == "sumwhatever_bleu") { - scorer = dynamic_cast(new SumWhateverBleuScorer); - } else if (scorer_str == "approx_bleu") { - scorer = dynamic_cast(new ApproxBleuScorer(N, approx_bleu_d)); - } else if (scorer_str == "lc_bleu") { - scorer = dynamic_cast(new LinearBleuScorer(N)); - } else { - cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; - exit(1); - } - vector bleu_weights; - scorer->Init(N, bleu_weights); - - // setup decoder observer - MT19937 rng; // random number generator, only for forest sampling - HypSampler* observer; - if (sample_from == "kbest") - observer = dynamic_cast(new KBestGetter(k, filter_type)); - else - observer = dynamic_cast(new KSampler(k, &rng)); - observer->SetScorer(scorer); - - // init weights - vector& dense_weights = decoder.CurrentWeightVector(); - SparseVector lambdas, cumulative_penalties, w_average; - if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as(), &dense_weights); - Weights::InitSparseVector(dense_weights, &lambdas); - - // meta params for perceptron, SVM - weight_t eta = cfg["learning_rate"].as(); - weight_t gamma = cfg["gamma"].as(); - - // l1 regularization - bool l1naive = false; - bool l1clip = false; - bool l1cumul = false; - weight_t l1_reg = 0; - if (cfg["l1_reg"].as() != "none") { - string s = cfg["l1_reg"].as(); - if (s == "naive") l1naive = true; - else if (s == "clip") l1clip = true; - else if (s == "cumul") l1cumul = true; - l1_reg = cfg["l1_reg_strength"].as(); - } - - // output - string output_fn = cfg["output"].as(); - // input - string input_fn = cfg["input"].as(); - ReadFile input(input_fn); - // buffer input for t > 0 - vector src_str_buf; // source strings (decoder takes only strings) - vector > ref_ids_buf; // references as WordID vecs - // where temp files go - string tmp_path = cfg["tmp"].as(); -#ifdef DTRAIN_LOCAL - string refs_fn = cfg["refs"].as(); - ReadFile refs(refs_fn); -#else - string grammar_buf_fn = gettmpf(tmp_path, "dtrain-grammars"); - ogzstream grammar_buf_out; - grammar_buf_out.open(grammar_buf_fn.c_str()); -#endif - - unsigned in_sz = std::numeric_limits::max(); // input index, input size - vector > all_scores; - score_t max_score = 0.; - unsigned best_it = 0; - float overall_time = 0.; - - // output cfg - if (!quiet) { - cerr << _p5; - cerr << endl << "dtrain" << endl << "Parameters:" << endl; - cerr << setw(25) << "k " << k << endl; - cerr << setw(25) << "N " << N << endl; - cerr << setw(25) << "T " << T << endl; - cerr << setw(25) << "scorer '" << scorer_str << "'" << endl; - if (scorer_str == "approx_bleu") - cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl; - cerr << setw(25) << "sample from " << "'" << sample_from << "'" << endl; - if (sample_from == "kbest") - cerr << setw(25) << "filter " << "'" << filter_type << "'" << endl; - if (!scale_bleu_diff) cerr << setw(25) << "learning rate " << eta << endl; - else cerr << setw(25) << "learning rate " << "bleu diff" << endl; - cerr << setw(25) << "gamma " << gamma << endl; - cerr << setw(25) << "loss margin " << loss_margin << endl; - cerr << setw(25) << "pairs " << "'" << pair_sampling << "'" << endl; - if (pair_sampling == "XYX") - cerr << setw(25) << "hi lo " << hi_lo << endl; - cerr << setw(25) << "pair threshold " << pair_threshold << endl; - cerr << setw(25) << "select weights " << "'" << select_weights << "'" << endl; - if (cfg.count("l1_reg")) - cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as() << "'" << endl; - if (rescale) - cerr << setw(25) << "rescale " << rescale << endl; - cerr << setw(25) << "max pairs " << max_pairs << endl; - cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl; - cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; -#ifdef DTRAIN_LOCAL - cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl; -#endif - cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; - if (cfg.count("input_weights")) - cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as() << "'" << endl; - if (stop_after > 0) - cerr << setw(25) << "stop_after " << stop_after << endl; - if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl; - } - - - for (unsigned t = 0; t < T; t++) // T epochs - { - - if (hstreaming) cerr << "reporter:status:Iteration #" << t+1 << " of " << T << endl; - - time_t start, end; - time(&start); -#ifndef DTRAIN_LOCAL - igzstream grammar_buf_in; - if (t > 0) grammar_buf_in.open(grammar_buf_fn.c_str()); -#endif - score_t score_sum = 0.; - score_t model_sum(0); - unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0; - if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl; - - while(true) - { - - string in; - bool next = false, stop = false; // next iteration or premature stop - if (t == 0) { - if(!getline(*input, in)) next = true; - } else { - if (ii == in_sz) next = true; // stop if we reach the end of our input - } - // stop after X sentences (but still go on for those) - if (stop_after > 0 && stop_after == ii && !next) stop = true; - - // produce some pretty output - if (!quiet && !verbose) { - if (ii == 0) cerr << " "; - if ((ii+1) % (DTRAIN_DOTS) == 0) { - cerr << "."; - cerr.flush(); - } - if ((ii+1) % (20*DTRAIN_DOTS) == 0) { - cerr << " " << ii+1 << endl; - if (!next && !stop) cerr << " "; - } - if (stop) { - if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; - cerr << "Stopping after " << stop_after << " input sentences." << endl; - } else { - if (next) { - if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; - } - } - } - - // next iteration - if (next || stop) break; - - // weights - lambdas.init_vector(&dense_weights); - - // getting input - vector ref_ids; // reference as vector -#ifndef DTRAIN_LOCAL - vector in_split; // input: sid\tsrc\tref\tpsg - if (t == 0) { - // handling input - split_in(in, in_split); - if (hstreaming && ii == 0) cerr << "reporter:counter:" << task_id << ",First ID," << in_split[0] << endl; - // getting reference - vector ref_tok; - boost::split(ref_tok, in_split[2], boost::is_any_of(" ")); - register_and_convert(ref_tok, ref_ids); - ref_ids_buf.push_back(ref_ids); - // process and set grammar - bool broken_grammar = true; // ignore broken grammars - for (string::iterator it = in.begin(); it != in.end(); it++) { - if (!isspace(*it)) { - broken_grammar = false; - break; - } - } - if (broken_grammar) { - cerr << "Broken grammar for " << ii+1 << "! Ignoring this input." << endl; - continue; - } - boost::replace_all(in, "\t", "\n"); - in += "\n"; - grammar_buf_out << in << DTRAIN_GRAMMAR_DELIM << " " << in_split[0] << endl; - decoder.AddSupplementalGrammarFromString(in); - src_str_buf.push_back(in_split[1]); - // decode - observer->SetRef(ref_ids); - decoder.Decode(in_split[1], observer); - } else { - // get buffered grammar - string grammar_str; - while (true) { - string rule; - getline(grammar_buf_in, rule); - if (boost::starts_with(rule, DTRAIN_GRAMMAR_DELIM)) break; - grammar_str += rule + "\n"; - } - decoder.AddSupplementalGrammarFromString(grammar_str); - // decode - observer->SetRef(ref_ids_buf[ii]); - decoder.Decode(src_str_buf[ii], observer); - } -#else - if (t == 0) { - string r_; - getline(*refs, r_); - vector ref_tok; - boost::split(ref_tok, r_, boost::is_any_of(" ")); - register_and_convert(ref_tok, ref_ids); - ref_ids_buf.push_back(ref_ids); - src_str_buf.push_back(in); - } else { - ref_ids = ref_ids_buf[ii]; - } - observer->SetRef(ref_ids); - if (t == 0) - decoder.Decode(in, observer); - else - decoder.Decode(src_str_buf[ii], observer); -#endif - - // get (scored) samples - vector* samples = observer->GetSamples(); - - if (verbose) { - cerr << "--- ref for " << ii << ": "; - if (t > 0) printWordIDVec(ref_ids_buf[ii]); - else printWordIDVec(ref_ids); - cerr << endl; - for (unsigned u = 0; u < samples->size(); u++) { - cerr << _p2 << _np << "[" << u << ". '"; - printWordIDVec((*samples)[u].w); - cerr << "'" << endl; - cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl; - cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl; - } - } - - score_sum += (*samples)[0].score; // stats for 1best - model_sum += (*samples)[0].model; - - f_count += observer->get_f_count(); - list_sz += observer->get_sz(); - - // weight updates - if (!noup) { - // get pairs - vector > pairs; - if (pair_sampling == "all") - all_pairs(samples, pairs, pair_threshold, max_pairs); - if (pair_sampling == "XYX") - partXYX(samples, pairs, pair_threshold, max_pairs, hi_lo); - if (pair_sampling == "PRO") - PROsampling(samples, pairs, pair_threshold, max_pairs); - npairs += pairs.size(); - - for (vector >::iterator it = pairs.begin(); - it != pairs.end(); it++) { -#ifdef DTRAIN_FASTER_PERCEPTRON - bool rank_error = true; // pair sampling already did this for us - rank_errors++; - score_t margin = std::numeric_limits::max(); -#else - bool rank_error = it->first.model <= it->second.model; - if (rank_error) rank_errors++; - score_t margin = fabs(fabs(it->first.model) - fabs(it->second.model)); - if (!rank_error && margin < loss_margin) margin_violations++; -#endif - if (scale_bleu_diff) eta = it->first.score - it->second.score; - if (rank_error || margin < loss_margin) { - SparseVector diff_vec = it->first.f - it->second.f; - lambdas.plus_eq_v_times_s(diff_vec, eta); - if (gamma) - lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); - } - } - - // l1 regularization - if (l1naive) { - for (unsigned d = 0; d < lambdas.size(); d++) { - weight_t v = lambdas.get(d); - lambdas.set_value(d, v - sign(v) * l1_reg); - } - } else if (l1clip) { - for (unsigned d = 0; d < lambdas.size(); d++) { - if (lambdas.nonzero(d)) { - weight_t v = lambdas.get(d); - if (v > 0) { - lambdas.set_value(d, max(0., v - l1_reg)); - } else { - lambdas.set_value(d, min(0., v + l1_reg)); - } - } - } - } else if (l1cumul) { - weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input - for (unsigned d = 0; d < lambdas.size(); d++) { - if (lambdas.nonzero(d)) { - weight_t v = lambdas.get(d); - weight_t penalty = 0; - if (v > 0) { - penalty = max(0., v-(acc_penalty + cumulative_penalties.get(d))); - } else { - penalty = min(0., v+(acc_penalty - cumulative_penalties.get(d))); - } - lambdas.set_value(d, penalty); - cumulative_penalties.set_value(d, cumulative_penalties.get(d)+penalty); - } - } - } - - } - - if (rescale) lambdas /= lambdas.l2norm(); - - ++ii; - - if (hstreaming) { - rep.update_counter("Seen #"+boost::lexical_cast(t+1), 1u); - rep.update_counter("Seen", 1u); - } - - } // input loop - - if (average) w_average += lambdas; - - if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset(); - - if (t == 0) { - in_sz = ii; // remember size of input (# lines) - if (hstreaming) { - rep.update_counter("|Input|", ii); - rep.update_gcounter("|Input|", ii); - rep.update_gcounter("Shards", 1u); - } - } - -#ifndef DTRAIN_LOCAL - if (t == 0) { - grammar_buf_out.close(); - } else { - grammar_buf_in.close(); - } -#endif - - // print some stats - score_t score_avg = score_sum/(score_t)in_sz; - score_t model_avg = model_sum/(score_t)in_sz; - score_t score_diff, model_diff; - if (t > 0) { - score_diff = score_avg - all_scores[t-1].first; - model_diff = model_avg - all_scores[t-1].second; - } else { - score_diff = score_avg; - model_diff = model_avg; - } - - unsigned nonz = 0; - if (!quiet || hstreaming) nonz = (unsigned)lambdas.num_nonzero(); - - if (!quiet) { - cerr << _p5 << _p << "WEIGHTS" << endl; - for (vector::iterator it = print_weights.begin(); it != print_weights.end(); it++) { - cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl; - } - cerr << " ---" << endl; - cerr << _np << " 1best avg score: " << score_avg; - cerr << _p << " (" << score_diff << ")" << endl; - cerr << _np << " 1best avg model score: " << model_avg; - cerr << _p << " (" << model_diff << ")" << endl; - cerr << " avg # pairs: "; - cerr << _np << npairs/(float)in_sz << endl; - cerr << " avg # rank err: "; - cerr << rank_errors/(float)in_sz << endl; -#ifndef DTRAIN_FASTER_PERCEPTRON - cerr << " avg # margin viol: "; - cerr << margin_violations/(float)in_sz << endl; -#endif - cerr << " non0 feature count: " << nonz << endl; - cerr << " avg list sz: " << list_sz/(float)in_sz << endl; - cerr << " avg f count: " << f_count/(float)list_sz << endl; - } - - if (hstreaming) { - rep.update_counter("Score 1best avg #"+boost::lexical_cast(t+1), (unsigned)(score_avg*DTRAIN_SCALE)); - rep.update_counter("Model 1best avg #"+boost::lexical_cast(t+1), (unsigned)(model_avg*DTRAIN_SCALE)); - rep.update_counter("Pairs avg #"+boost::lexical_cast(t+1), (unsigned)((npairs/(weight_t)in_sz)*DTRAIN_SCALE)); - rep.update_counter("Rank errors avg #"+boost::lexical_cast(t+1), (unsigned)((rank_errors/(weight_t)in_sz)*DTRAIN_SCALE)); - rep.update_counter("Margin violations avg #"+boost::lexical_cast(t+1), (unsigned)((margin_violations/(weight_t)in_sz)*DTRAIN_SCALE)); - rep.update_counter("Non zero feature count #"+boost::lexical_cast(t+1), nonz); - rep.update_gcounter("Non zero feature count #"+boost::lexical_cast(t+1), nonz); - } - - pair remember; - remember.first = score_avg; - remember.second = model_avg; - all_scores.push_back(remember); - if (score_avg > max_score) { - max_score = score_avg; - best_it = t; - } - time (&end); - float time_diff = difftime(end, start); - overall_time += time_diff; - if (!quiet) { - cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; - cerr << time_diff/in_sz << " s/S)" << endl; - } - if (t+1 != T && !quiet) cerr << endl; - - if (noup) break; - - // write weights to file - if (select_weights == "best" || keep) { - lambdas.init_vector(&dense_weights); - string w_fn = "weights." + boost::lexical_cast(t) + ".gz"; - Weights::WriteToFile(w_fn, dense_weights, true); - } - - } // outer loop - - if (average) w_average /= (weight_t)T; - -#ifndef DTRAIN_LOCAL - unlink(grammar_buf_fn.c_str()); -#endif - - if (!noup) { - if (!quiet) cerr << endl << "Writing weights file to '" << output_fn << "' ..." << endl; - if (select_weights == "last" || average) { // last, average - WriteFile of(output_fn); // works with '-' - ostream& o = *of.stream(); - o.precision(17); - o << _np; - if (average) { - for (SparseVector::iterator it = w_average.begin(); it != w_average.end(); ++it) { - if (it->second == 0) continue; - o << FD::Convert(it->first) << '\t' << it->second << endl; - } - } else { - for (SparseVector::iterator it = lambdas.begin(); it != lambdas.end(); ++it) { - if (it->second == 0) continue; - o << FD::Convert(it->first) << '\t' << it->second << endl; - } - } - } else if (select_weights == "VOID") { // do nothing with the weights - } else { // best - if (output_fn != "-") { - CopyFile("weights."+boost::lexical_cast(best_it)+".gz", output_fn); - } else { - ReadFile bestw("weights."+boost::lexical_cast(best_it)+".gz"); - string o; - cout.precision(17); - cout << _np; - while(getline(*bestw, o)) cout << o << endl; - } - if (!keep) { - for (unsigned i = 0; i < T; i++) { - string s = "weights." + boost::lexical_cast(i) + ".gz"; - unlink(s.c_str()); - } - } - } - if (output_fn == "-" && hstreaming) cout << "__SHARD_COUNT__\t1" << endl; - if (!quiet) cerr << "done" << endl; - } - - if (!quiet) { - cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; - cerr << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; - cerr << "This took " << overall_time/60. << " min." << endl; - } -} - diff --git a/dtrain/dtrain.h b/dtrain/dtrain.h deleted file mode 100644 index 4b6f415c..00000000 --- a/dtrain/dtrain.h +++ /dev/null @@ -1,97 +0,0 @@ -#ifndef _DTRAIN_H_ -#define _DTRAIN_H_ - -#undef DTRAIN_FASTER_PERCEPTRON // only look at misranked pairs - // DO NOT USE WITH SVM! -//#define DTRAIN_LOCAL -#define DTRAIN_DOTS 10 // after how many inputs to display a '.' -#define DTRAIN_GRAMMAR_DELIM "########EOS########" -#define DTRAIN_SCALE 100000 - - -#include -#include -#include - -#include -#include - -#include "ksampler.h" -#include "pairsampling.h" - -#include "filelib.h" - - -using namespace std; -using namespace dtrain; -namespace po = boost::program_options; - -inline void register_and_convert(const vector& strs, vector& ids) -{ - vector::const_iterator it; - for (it = strs.begin(); it < strs.end(); it++) - ids.push_back(TD::Convert(*it)); -} - -inline string gettmpf(const string path, const string infix) -{ - char fn[path.size() + infix.size() + 8]; - strcpy(fn, path.c_str()); - strcat(fn, "/"); - strcat(fn, infix.c_str()); - strcat(fn, "-XXXXXX"); - if (!mkstemp(fn)) { - cerr << "Cannot make temp file in" << path << " , exiting." << endl; - exit(1); - } - return string(fn); -} - -inline void split_in(string& s, vector& parts) -{ - unsigned f = 0; - for(unsigned i = 0; i < 3; i++) { - unsigned e = f; - f = s.find("\t", f+1); - if (e != 0) parts.push_back(s.substr(e+1, f-e-1)); - else parts.push_back(s.substr(0, f)); - } - s.erase(0, f+1); -} - -struct HSReporter -{ - string task_id_; - - HSReporter(string task_id) : task_id_(task_id) {} - - inline void update_counter(string name, unsigned amount) { - cerr << "reporter:counter:" << task_id_ << "," << name << "," << amount << endl; - } - inline void update_gcounter(string name, unsigned amount) { - cerr << "reporter:counter:Global," << name << "," << amount << endl; - } -}; - -inline ostream& _np(ostream& out) { return out << resetiosflags(ios::showpos); } -inline ostream& _p(ostream& out) { return out << setiosflags(ios::showpos); } -inline ostream& _p2(ostream& out) { return out << setprecision(2); } -inline ostream& _p5(ostream& out) { return out << setprecision(5); } - -inline void printWordIDVec(vector& v) -{ - for (unsigned i = 0; i < v.size(); i++) { - cerr << TD::Convert(v[i]); - if (i < v.size()-1) cerr << " "; - } -} - -template -inline T sign(T z) -{ - if (z == 0) return 0; - return z < 0 ? -1 : +1; -} - -#endif - diff --git a/dtrain/hstreaming/avg.rb b/dtrain/hstreaming/avg.rb deleted file mode 100755 index 2599c732..00000000 --- a/dtrain/hstreaming/avg.rb +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env ruby -# first arg may be an int of custom shard count - -shard_count_key = "__SHARD_COUNT__" - -STDIN.set_encoding 'utf-8' -STDOUT.set_encoding 'utf-8' - -w = {} -c = {} -w.default = 0 -c.default = 0 -while line = STDIN.gets - key, val = line.split /\s/ - w[key] += val.to_f - c[key] += 1 -end - -if ARGV.size == 0 - shard_count = w["__SHARD_COUNT__"] -else - shard_count = ARGV[0].to_f -end -w.each_key { |k| - if k == shard_count_key - next - else - puts "#{k}\t#{w[k]/shard_count}" - #puts "# #{c[k]}" - end -} - diff --git a/dtrain/hstreaming/cdec.ini b/dtrain/hstreaming/cdec.ini deleted file mode 100644 index d4f5cecd..00000000 --- a/dtrain/hstreaming/cdec.ini +++ /dev/null @@ -1,22 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -scfg_max_span_limit=15 -intersection_strategy=cube_pruning -cubepruning_pop_limit=30 -feature_function=WordPenalty -feature_function=KLanguageModel nc-wmt11.en.srilm.gz -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/hstreaming/dtrain.ini b/dtrain/hstreaming/dtrain.ini deleted file mode 100644 index a2c219a1..00000000 --- a/dtrain/hstreaming/dtrain.ini +++ /dev/null @@ -1,15 +0,0 @@ -input=- -output=- -decoder_config=cdec.ini -tmp=/var/hadoop/mapred/local/ -epochs=1 -k=100 -N=4 -learning_rate=0.0001 -gamma=0 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -pair_threshold=0 -select_weights=last diff --git a/dtrain/hstreaming/dtrain.sh b/dtrain/hstreaming/dtrain.sh deleted file mode 100755 index 877ff94c..00000000 --- a/dtrain/hstreaming/dtrain.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# script to run dtrain with a task id - -pushd . &>/dev/null -cd .. -ID=$(basename $(pwd)) # attempt_... -popd &>/dev/null -./dtrain -c dtrain.ini --hstreaming $ID - diff --git a/dtrain/hstreaming/hadoop-streaming-job.sh b/dtrain/hstreaming/hadoop-streaming-job.sh deleted file mode 100755 index 92419956..00000000 --- a/dtrain/hstreaming/hadoop-streaming-job.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -EXP=a_simple_test - -# change these vars to fit your hadoop installation -HADOOP_HOME=/usr/lib/hadoop-0.20 -JAR=contrib/streaming/hadoop-streaming-0.20.2-cdh3u1.jar -HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" - - IN=input_on_hdfs -OUT=output_weights_on_hdfs - -# you can -reducer to NONE if you want to -# do feature selection/averaging locally (e.g. to -# keep weights of all epochs) -$HSTREAMING \ - -mapper "dtrain.sh" \ - -reducer "ruby lplp.rb l2 select_k 100000" \ - -input $IN \ - -output $OUT \ - -file dtrain.sh \ - -file lplp.rb \ - -file ../dtrain \ - -file dtrain.ini \ - -file cdec.ini \ - -file ../test/example/nc-wmt11.en.srilm.gz \ - -jobconf mapred.reduce.tasks=30 \ - -jobconf mapred.max.map.failures.percent=0 \ - -jobconf mapred.job.name="dtrain $EXP" - diff --git a/dtrain/hstreaming/lplp.rb b/dtrain/hstreaming/lplp.rb deleted file mode 100755 index f0cd58c5..00000000 --- a/dtrain/hstreaming/lplp.rb +++ /dev/null @@ -1,131 +0,0 @@ -# lplp.rb - -# norms -def l0(feature_column, n) - if feature_column.size >= n then return 1 else return 0 end -end - -def l1(feature_column, n=-1) - return feature_column.map { |i| i.abs }.reduce { |sum,i| sum+i } -end - -def l2(feature_column, n=-1) - return Math.sqrt feature_column.map { |i| i.abs2 }.reduce { |sum,i| sum+i } -end - -def linfty(feature_column, n=-1) - return feature_column.map { |i| i.abs }.max -end - -# stats -def median(feature_column, n) - return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0}).sort[feature_column.size/2] -end - -def mean(feature_column, n) - return feature_column.reduce { |sum, i| sum+i } / n -end - -# selection -def select_k(weights, norm_fun, n, k=10000) - weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p| - puts "#{p[0]}\t#{mean(p[1], n)}" - k -= 1 - if k == 0 then break end - } -end - -def cut(weights, norm_fun, n, epsilon=0.0001) - weights.each { |k,v| - if norm_fun.call(v, n).abs >= epsilon - puts "#{k}\t#{mean(v, n)}" - end - } -end - -# test -def _test() - puts - w = {} - w["a"] = [1, 2, 3] - w["b"] = [1, 2] - w["c"] = [66] - w["d"] = [10, 20, 30] - n = 3 - puts w.to_s - puts - puts "select_k" - puts "l0 expect ad" - select_k(w, method(:l0), n, 2) - puts "l1 expect cd" - select_k(w, method(:l1), n, 2) - puts "l2 expect c" - select_k(w, method(:l2), n, 1) - puts - puts "cut" - puts "l1 expect cd" - cut(w, method(:l1), n, 7) - puts - puts "median" - a = [1,2,3,4,5] - puts a.to_s - puts median(a, 5) - puts - puts "#{median(a, 7)} <- that's because we add missing 0s:" - puts a.concat(0.step(7-a.size-1).map{|i|0}).to_s - puts - puts "mean expect bc" - w.clear - w["a"] = [2] - w["b"] = [2.1] - w["c"] = [2.2] - cut(w, method(:mean), 1, 2.05) - exit -end -#_test() - -# actually do something -def usage() - puts "lplp.rb [n] < " - puts " l0...: norms for selection" - puts "select_k: only output top k (according to the norm of their column vector) features" - puts " cut: output features with weight >= threshold" - puts " n: if we do not have a shard count use this number for averaging" - exit -end - -if ARGV.size < 3 then usage end -norm_fun = method(ARGV[0].to_sym) -type = ARGV[1] -x = ARGV[2].to_f - -shard_count_key = "__SHARD_COUNT__" - -STDIN.set_encoding 'utf-8' -STDOUT.set_encoding 'utf-8' - -w = {} -shard_count = 0 -while line = STDIN.gets - key, val = line.split /\s+/ - if key == shard_count_key - shard_count += 1 - next - end - if w.has_key? key - w[key].push val.to_f - else - w[key] = [val.to_f] - end -end - -if ARGV.size == 4 then shard_count = ARGV[3].to_f end - -if type == 'cut' - cut(w, norm_fun, shard_count, x) -elsif type == 'select_k' - select_k(w, norm_fun, shard_count, x) -else - puts "oh oh" -end - diff --git a/dtrain/hstreaming/red-test b/dtrain/hstreaming/red-test deleted file mode 100644 index 2623d697..00000000 --- a/dtrain/hstreaming/red-test +++ /dev/null @@ -1,9 +0,0 @@ -a 1 -b 2 -c 3.5 -a 1 -b 2 -c 3.5 -d 1 -e 2 -__SHARD_COUNT__ 2 diff --git a/dtrain/kbestget.h b/dtrain/kbestget.h deleted file mode 100644 index dd8882e1..00000000 --- a/dtrain/kbestget.h +++ /dev/null @@ -1,152 +0,0 @@ -#ifndef _DTRAIN_KBESTGET_H_ -#define _DTRAIN_KBESTGET_H_ - -#include "kbest.h" // cdec -#include "sentence_metadata.h" - -#include "verbose.h" -#include "viterbi.h" -#include "ff_register.h" -#include "decoder.h" -#include "weights.h" -#include "logval.h" - -using namespace std; - -namespace dtrain -{ - - -typedef double score_t; - -struct ScoredHyp -{ - vector w; - SparseVector f; - score_t model; - score_t score; - unsigned rank; -}; - -struct LocalScorer -{ - unsigned N_; - vector w_; - - virtual score_t - Score(vector& hyp, vector& ref, const unsigned rank, const unsigned src_len)=0; - - void Reset() {} // only for approx bleu - - inline void - Init(unsigned N, vector weights) - { - assert(N > 0); - N_ = N; - if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_); - else w_ = weights; - } - - inline score_t - brevity_penalty(const unsigned hyp_len, const unsigned ref_len) - { - if (hyp_len > ref_len) return 1; - return exp(1 - (score_t)ref_len/hyp_len); - } -}; - -struct HypSampler : public DecoderObserver -{ - LocalScorer* scorer_; - vector* ref_; - unsigned f_count_, sz_; - virtual vector* GetSamples()=0; - inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; } - inline void SetRef(vector& ref) { ref_ = &ref; } - inline unsigned get_f_count() { return f_count_; } - inline unsigned get_sz() { return sz_; } -}; -//////////////////////////////////////////////////////////////////////////////// - - - - -struct KBestGetter : public HypSampler -{ - const unsigned k_; - const string filter_type_; - vector s_; - unsigned src_len_; - - KBestGetter(const unsigned k, const string filter_type) : - k_(k), filter_type_(filter_type) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - KBestScored(*hg); - } - - vector* GetSamples() { return &s_; } - - void - KBestScored(const Hypergraph& forest) - { - if (filter_type_ == "uniq") { - KBestUnique(forest); - } else if (filter_type_ == "not") { - KBestNoFilter(forest); - } - } - - void - KBestUnique(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations, ESentenceTraversal, - KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations, ESentenceTraversal, KBest::FilterUnique, - prob_t, EdgeProb>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } - - void - KBestNoFilter(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations, ESentenceTraversal> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } -}; - - -} // namespace - -#endif - diff --git a/dtrain/ksampler.h b/dtrain/ksampler.h deleted file mode 100644 index bc2f56cd..00000000 --- a/dtrain/ksampler.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _DTRAIN_KSAMPLER_H_ -#define _DTRAIN_KSAMPLER_H_ - -#include "hg_sampler.h" // cdec -#include "kbestget.h" -#include "score.h" - -namespace dtrain -{ - -bool -cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b) -{ - return a.model > b.model; -} - -struct KSampler : public HypSampler -{ - const unsigned k_; - vector s_; - MT19937* prng_; - score_t (*scorer)(NgramCounts&, const unsigned, const unsigned, unsigned, vector); - unsigned src_len_; - - explicit KSampler(const unsigned k, MT19937* prng) : - k_(k), prng_(prng) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - ScoredSamples(*hg); - } - - vector* GetSamples() { return &s_; } - - void ScoredSamples(const Hypergraph& forest) { - s_.clear(); sz_ = f_count_ = 0; - std::vector samples; - HypergraphSampler::sample_hypotheses(forest, k_, prng_, &samples); - for (unsigned i = 0; i < k_; ++i) { - ScoredHyp h; - h.w = samples[i].words; - h.f = samples[i].fmap; - h.model = log(samples[i].model_score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - sort(s_.begin(), s_.end(), cmp_hyp_by_model_d); - for (unsigned i = 0; i < s_.size(); i++) s_[i].rank = i; - } -}; - - -} // namespace - -#endif - diff --git a/dtrain/pairsampling.h b/dtrain/pairsampling.h deleted file mode 100644 index 84be1efb..00000000 --- a/dtrain/pairsampling.h +++ /dev/null @@ -1,149 +0,0 @@ -#ifndef _DTRAIN_PAIRSAMPLING_H_ -#define _DTRAIN_PAIRSAMPLING_H_ - -namespace dtrain -{ - - -bool -accept_pair(score_t a, score_t b, score_t threshold) -{ - if (fabs(a - b) < threshold) return false; - return true; -} - -bool -cmp_hyp_by_score_d(ScoredHyp a, ScoredHyp b) -{ - return a.score > b.score; -} - -inline void -all_pairs(vector* s, vector >& training, score_t threshold, unsigned max, float _unused=1) -{ - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sz = s->size(); - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } - } - if (b) break; - } -} - -/* - * multipartite ranking - * sort (descending) by bleu - * compare top X to middle Y and low X - * cmp middle Y to low X - */ - -inline void -partXYX(vector* s, vector >& training, score_t threshold, unsigned max, float hi_lo) -{ - unsigned sz = s->size(); - if (sz < 2) return; - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sep = round(sz*hi_lo); - unsigned sep_hi = sep; - if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi; - else sep_hi = 1; - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sep_hi; i++) { - for (unsigned j = sep_hi; j < sz; j++) { -#ifdef DTRAIN_FASTER_PERCEPTRON - if ((*s)[i].model <= (*s)[j].model) { -#endif - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } -#ifdef DTRAIN_FASTER_PERCEPTRON - } -#endif - } - if (b) break; - } - unsigned sep_lo = sz-sep; - while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo; - for (unsigned i = sep_hi; i < sz-sep_lo; i++) { - for (unsigned j = sz-sep_lo; j < sz; j++) { -#ifdef DTRAIN_FASTER_PERCEPTRON - if ((*s)[i].model <= (*s)[j].model) { -#endif - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) return; -#ifdef DTRAIN_FASTER_PERCEPTRON - } -#endif - } - } -} - -/* - * pair sampling as in - * 'Tuning as Ranking' (Hopkins & May, 2011) - * count = 5000 - * threshold = 5% BLEU (0.05 for param 3) - * cut = top 50 - */ -bool -_PRO_cmp_pair_by_diff_d(pair a, pair b) -{ - return (fabs(a.first.score - a.second.score)) > (fabs(b.first.score - b.second.score)); -} -inline void -PROsampling(vector* s, vector >& training, score_t threshold, unsigned max, float _unused=1) -{ - unsigned max_count = 5000, count = 0, sz = s->size(); - bool b = false; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) { - training.push_back(make_pair((*s)[i], (*s)[j])); - if (++count == max_count) { - b = true; - break; - } - } - } - if (b) break; - } - if (training.size() > 50) { - sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff_d); - training.erase(training.begin()+50, training.end()); - } - return; -} - - -} // namespace - -#endif - diff --git a/dtrain/parallelize.rb b/dtrain/parallelize.rb deleted file mode 100755 index 1d277ff6..00000000 --- a/dtrain/parallelize.rb +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env ruby - - -if ARGV.size != 5 - STDERR.write "Usage: " - STDERR.write "ruby parallelize.rb <#shards> \n" - exit -end - -dtrain_bin = '/home/pks/bin/dtrain_local' -ruby = '/usr/bin/ruby' -lplp_rb = '/home/pks/mt/cdec-dtrain/dtrain/hstreaming/lplp.rb' -lplp_args = 'l2 select_k 100000' -gzip = '/bin/gzip' - -num_shards = ARGV[0].to_i -input = ARGV[1] -refs = ARGV[2] -epochs = ARGV[3].to_i -ini = ARGV[4] - - -`mkdir work` - -def make_shards(input, refs, num_shards) - lc = `wc -l #{input}`.split.first.to_i - shard_sz = lc / num_shards - leftover = lc % num_shards - in_f = File.new input, 'r' - refs_f = File.new refs, 'r' - shard_in_files = [] - shard_refs_files = [] - 0.upto(num_shards-1) { |shard| - shard_in = File.new "work/shard.#{shard}.in", 'w+' - shard_refs = File.new "work/shard.#{shard}.refs", 'w+' - 0.upto(shard_sz-1) { |i| - shard_in.write in_f.gets - shard_refs.write refs_f.gets - } - shard_in_files << shard_in - shard_refs_files << shard_refs - } - while leftover > 0 - shard_in_files[-1].write in_f.gets - shard_refs_files[-1].write refs_f.gets - leftover -= 1 - end - (shard_in_files + shard_refs_files).each do |f| f.close end - in_f.close - refs_f.close -end - -make_shards input, refs, num_shards - -0.upto(epochs-1) { |epoch| - pids = [] - input_weights = '' - if epoch > 0 then input_weights = "--input_weights work/weights.#{epoch-1}" end - weights_files = [] - 0.upto(num_shards-1) { |shard| - pids << Kernel.fork { - `#{dtrain_bin} -c #{ini}\ - --input work/shard.#{shard}.in\ - --refs work/shard.#{shard}.refs #{input_weights}\ - --output work/weights.#{shard}.#{epoch}\ - &> work/out.#{shard}.#{epoch}` - } - weights_files << "work/weights.#{shard}.#{epoch}" - } - pids.each { |pid| Process.wait(pid) } - cat = File.new('work/weights_cat', 'w+') - weights_files.each { |f| cat.write File.new(f, 'r').read } - cat.close - `#{ruby} #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat &> work/weights.#{epoch}` -} - -`rm work/weights_cat` -`#{gzip} work/*` - diff --git a/dtrain/parallelize/test/cdec.ini b/dtrain/parallelize/test/cdec.ini deleted file mode 100644 index 72e99dc5..00000000 --- a/dtrain/parallelize/test/cdec.ini +++ /dev/null @@ -1,22 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -intersection_strategy=cube_pruning -cubepruning_pop_limit=200 -scfg_max_span_limit=15 -feature_function=WordPenalty -feature_function=KLanguageModel /stor/dat/wmt12/en/news_only/m/wmt12.news.en.3.kenv5 -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/parallelize/test/dtrain.ini b/dtrain/parallelize/test/dtrain.ini deleted file mode 100644 index 03f9d240..00000000 --- a/dtrain/parallelize/test/dtrain.ini +++ /dev/null @@ -1,15 +0,0 @@ -k=100 -N=4 -learning_rate=0.0001 -gamma=0 -loss_margin=0 -epochs=1 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -hi_lo=0.1 -select_weights=last -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -tmp=/tmp -decoder_config=cdec.ini diff --git a/dtrain/parallelize/test/in b/dtrain/parallelize/test/in deleted file mode 100644 index a312809f..00000000 --- a/dtrain/parallelize/test/in +++ /dev/null @@ -1,10 +0,0 @@ -barack obama erhält als vierter us @-@ präsident den frieden nobelpreis -der amerikanische präsident barack obama kommt für 26 stunden nach oslo , norwegen , um hier als vierter us @-@ präsident in der geschichte den frieden nobelpreis entgegen zunehmen . -darüber hinaus erhält er das diplom sowie die medaille und einen scheck über 1,4 mio. dollar für seine außer gewöhnlichen bestrebungen um die intensivierung der welt diplomatie und zusammen arbeit unter den völkern . -der chef des weißen hauses kommt morgen zusammen mit seiner frau michelle in der nordwegischen metropole an und wird die ganze zeit beschäftigt sein . -zunächst stattet er dem nobel @-@ institut einen besuch ab , wo er überhaupt zum ersten mal mit den fünf ausschuss mitglieder zusammen trifft , die ihn im oktober aus 172 leuten und 33 organisationen gewählt haben . -das präsidenten paar hat danach ein treffen mit dem norwegischen könig harald v. und königin sonja eingeplant . -nachmittags erreicht dann der besuch seinen höhepunkt mit der zeremonie , bei der obama den prestige preis übernimmt . -diesen erhält er als der vierte us @-@ präsident , aber erst als der dritte , der den preis direkt im amt entgegen nimmt . -das weiße haus avisierte schon , dass obama bei der übernahme des preises über den afghanistan krieg sprechen wird . -der präsident will diesem thema nicht ausweichen , weil er weiß , dass er den preis als ein präsident übernimmt , der zur zeit krieg in zwei ländern führt . diff --git a/dtrain/parallelize/test/refs b/dtrain/parallelize/test/refs deleted file mode 100644 index 4d3128cb..00000000 --- a/dtrain/parallelize/test/refs +++ /dev/null @@ -1,10 +0,0 @@ -barack obama becomes the fourth american president to receive the nobel peace prize -the american president barack obama will fly into oslo , norway for 26 hours to receive the nobel peace prize , the fourth american president in history to do so . -he will receive a diploma , medal and cheque for 1.4 million dollars for his exceptional efforts to improve global diplomacy and encourage international cooperation , amongst other things . -the head of the white house will be flying into the norwegian city in the morning with his wife michelle and will have a busy schedule . -first , he will visit the nobel institute , where he will have his first meeting with the five committee members who selected him from 172 people and 33 organisations . -the presidential couple then has a meeting scheduled with king harald v and queen sonja of norway . -then , in the afternoon , the visit will culminate in a grand ceremony , at which obama will receive the prestigious award . -he will be the fourth american president to be awarded the prize , and only the third to have received it while actually in office . -the white house has stated that , when he accepts the prize , obama will speak about the war in afghanistan . -the president does not want to skirt around this topic , as he realises that he is accepting the prize as a president whose country is currently at war in two countries . diff --git a/dtrain/score.cc b/dtrain/score.cc deleted file mode 100644 index 34fc86a9..00000000 --- a/dtrain/score.cc +++ /dev/null @@ -1,254 +0,0 @@ -#include "score.h" - -namespace dtrain -{ - - -/* - * bleu - * - * as in "BLEU: a Method for Automatic Evaluation - * of Machine Translation" - * (Papineni et al. '02) - * - * NOTE: 0 if for one n \in {1..N} count is 0 - */ -score_t -BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len) -{ - if (hyp_len == 0 || ref_len == 0) return 0.; - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) return 0.; - sum += v[i] * log((score_t)counts.clipped_[i]/counts.sum_[i]); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -score_t -BleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - return Bleu(counts, hyp_len, ref_len); -} - -/* - * 'stupid' bleu - * - * as in "ORANGE: a Method for Evaluating - * Automatic Evaluation Metrics - * for Machine Translation" - * (Lin & Och '04) - * - * NOTE: 0 iff no 1gram match - */ -score_t -StupidBleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0, add = 0; - for (unsigned i = 0; i < M; i++) { - if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; - if (i == 1) add = 1; - sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -/* - * smooth bleu - * - * as in "An End-to-End Discriminative Approach - * to Machine Translation" - * (Liang et al. '06) - * - * NOTE: max is 0.9375 (with N=4) - */ -score_t -SmoothBleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - vector i_bleu; - for (unsigned i = 0; i < M; i++) i_bleu.push_back(0.); - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) { - break; - } else { - score_t i_ng = log((score_t)counts.clipped_[i]/counts.sum_[i]); - for (unsigned j = i; j < M; j++) { - i_bleu[j] += (1/((score_t)j+1)) * i_ng; - } - } - sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i)); - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' bleu - * - * sum up Ngram precisions - */ -score_t -SumBleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += ((score_t)counts.clipped_[i]/counts.sum_[i])/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (exp) bleu - * - * sum up exp(Ngram precisions) - */ -score_t -SumExpBleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(((score_t)counts.clipped_[i]/counts.sum_[i]))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (whatever) bleu - * - * sum up exp(weight * log(Ngram precisions)) - */ -score_t -SumWhateverBleuScorer::Score(vector& hyp, vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(v[i] * log(((score_t)counts.clipped_[i]/counts.sum_[i])))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * approx. bleu - * - * as in "Online Large-Margin Training of Syntactic - * and Structural Translation Features" - * (Chiang et al. '08) - * - * NOTE: Needs some more code in dtrain.cc . - * No scaling by src len. - */ -score_t -ApproxBleuScorer::Score(vector& hyp, vector& ref, - const unsigned rank, const unsigned src_len) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - score_t score = 0.; - NgramCounts counts(N_); - if (hyp_len > 0) { - counts = make_ngram_counts(hyp, ref, N_); - NgramCounts tmp = glob_onebest_counts_ + counts; - score = Bleu(tmp, hyp_len, ref_len); - } - if (rank == 0) { // 'context of 1best translations' - glob_onebest_counts_ += counts; - glob_onebest_counts_ *= discount_; - glob_hyp_len_ = discount_ * (glob_hyp_len_ + hyp_len); - glob_ref_len_ = discount_ * (glob_ref_len_ + ref_len); - glob_src_len_ = discount_ * (glob_src_len_ + src_len); - } - return score; -} - -/* - * Linear (Corpus) Bleu - * - * as in "Lattice Minimum Bayes-Risk Decoding - * for Statistical Machine Translation" - * (Tromble et al. '08) - * - */ -score_t -LinearBleuScorer::Score(vector& hyp, vector& ref, - const unsigned rank, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - unsigned M = N_; - if (ref_len < N_) M = ref_len; - NgramCounts counts(M); - if (hyp_len > 0) - counts = make_ngram_counts(hyp, ref, M); - score_t ret = 0.; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || onebest_counts_.sum_[i] == 0) break; - ret += counts.sum_[i]/onebest_counts_.sum_[i]; - } - ret = -(hyp_len/(score_t)onebest_len_) + (1./M) * ret; - if (rank == 0) { - onebest_len_ += hyp_len; - onebest_counts_ += counts; - } - return ret; -} - - -} // namespace - diff --git a/dtrain/score.h b/dtrain/score.h deleted file mode 100644 index f317c903..00000000 --- a/dtrain/score.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef _DTRAIN_SCORE_H_ -#define _DTRAIN_SCORE_H_ - -#include "kbestget.h" - -using namespace std; - -namespace dtrain -{ - - -struct NgramCounts -{ - unsigned N_; - map clipped_; - map sum_; - - NgramCounts(const unsigned N) : N_(N) { Zero(); } - - inline void - operator+=(const NgramCounts& rhs) - { - if (rhs.N_ > N_) Resize(rhs.N_); - for (unsigned i = 0; i < N_; i++) { - this->clipped_[i] += rhs.clipped_.find(i)->second; - this->sum_[i] += rhs.sum_.find(i)->second; - } - } - - inline const NgramCounts - operator+(const NgramCounts &other) const - { - NgramCounts result = *this; - result += other; - return result; - } - - inline void - operator*=(const score_t rhs) - { - for (unsigned i = 0; i < N_; i++) { - this->clipped_[i] *= rhs; - this->sum_[i] *= rhs; - } - } - - inline void - Add(const unsigned count, const unsigned ref_count, const unsigned i) - { - assert(i < N_); - if (count > ref_count) { - clipped_[i] += ref_count; - } else { - clipped_[i] += count; - } - sum_[i] += count; - } - - inline void - Zero() - { - for (unsigned i = 0; i < N_; i++) { - clipped_[i] = 0.; - sum_[i] = 0.; - } - } - - inline void - One() - { - for (unsigned i = 0; i < N_; i++) { - clipped_[i] = 1.; - sum_[i] = 1.; - } - } - - inline void - Print() - { - for (unsigned i = 0; i < N_; i++) { - cout << i+1 << "grams (clipped):\t" << clipped_[i] << endl; - cout << i+1 << "grams:\t\t\t" << sum_[i] << endl; - } - } - - inline void Resize(unsigned N) - { - if (N == N_) return; - else if (N > N_) { - for (unsigned i = N_; i < N; i++) { - clipped_[i] = 0.; - sum_[i] = 0.; - } - } else { // N < N_ - for (unsigned i = N_-1; i > N-1; i--) { - clipped_.erase(i); - sum_.erase(i); - } - } - N_ = N; - } -}; - -typedef map, unsigned> Ngrams; - -inline Ngrams -make_ngrams(const vector& s, const unsigned N) -{ - Ngrams ngrams; - vector ng; - for (size_t i = 0; i < s.size(); i++) { - ng.clear(); - for (unsigned j = i; j < min(i+N, s.size()); j++) { - ng.push_back(s[j]); - ngrams[ng]++; - } - } - return ngrams; -} - -inline NgramCounts -make_ngram_counts(const vector& hyp, const vector& ref, const unsigned N) -{ - Ngrams hyp_ngrams = make_ngrams(hyp, N); - Ngrams ref_ngrams = make_ngrams(ref, N); - NgramCounts counts(N); - Ngrams::iterator it; - Ngrams::iterator ti; - for (it = hyp_ngrams.begin(); it != hyp_ngrams.end(); it++) { - ti = ref_ngrams.find(it->first); - if (ti != ref_ngrams.end()) { - counts.Add(it->second, ti->second, it->first.size() - 1); - } else { - counts.Add(it->second, 0, it->first.size() - 1); - } - } - return counts; -} - -struct BleuScorer : public LocalScorer -{ - score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len); - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct StupidBleuScorer : public LocalScorer -{ - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SmoothBleuScorer : public LocalScorer -{ - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumBleuScorer : public LocalScorer -{ - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumExpBleuScorer : public LocalScorer -{ - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumWhateverBleuScorer : public LocalScorer -{ - score_t Score(vector& hyp, vector& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct ApproxBleuScorer : public BleuScorer -{ - NgramCounts glob_onebest_counts_; - unsigned glob_hyp_len_, glob_ref_len_, glob_src_len_; - score_t discount_; - - ApproxBleuScorer(unsigned N, score_t d) : glob_onebest_counts_(NgramCounts(N)), discount_(d) - { - glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0; - } - - inline void Reset() { - glob_onebest_counts_.Zero(); - glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0.; - } - - score_t Score(vector& hyp, vector& ref, const unsigned rank, const unsigned src_len); -}; - -struct LinearBleuScorer : public BleuScorer -{ - unsigned onebest_len_; - NgramCounts onebest_counts_; - - LinearBleuScorer(unsigned N) : onebest_len_(1), onebest_counts_(N) - { - onebest_counts_.One(); - } - - score_t Score(vector& hyp, vector& ref, const unsigned rank, const unsigned /*src_len*/); - - inline void Reset() { - onebest_len_ = 1; - onebest_counts_.One(); - } -}; - - -} // namespace - -#endif - diff --git a/dtrain/test/example/README b/dtrain/test/example/README deleted file mode 100644 index 6937b11b..00000000 --- a/dtrain/test/example/README +++ /dev/null @@ -1,8 +0,0 @@ -Small example of input format for distributed training. -Call dtrain from cdec/dtrain/ with ./dtrain -c test/example/dtrain.ini . - -For this to work, undef 'DTRAIN_LOCAL' in dtrain.h -and recompile. - -Data is here: http://simianer.de/#dtrain - diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini deleted file mode 100644 index d5955f0e..00000000 --- a/dtrain/test/example/cdec.ini +++ /dev/null @@ -1,25 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -scfg_max_span_limit=15 -intersection_strategy=cube_pruning -cubepruning_pop_limit=30 -feature_function=WordPenalty -feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz -# all currently working feature functions for translation: -# (with those features active that were used in the ACL paper) -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -feature_function=RuleIdentityFeatures -feature_function=RuleSourceBigramFeatures -feature_function=RuleTargetBigramFeatures -feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini deleted file mode 100644 index 72d50ca1..00000000 --- a/dtrain/test/example/dtrain.ini +++ /dev/null @@ -1,22 +0,0 @@ -input=test/example/nc-wmt11.1k.gz # use '-' for STDIN -output=- # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=VOID # don't output weights -decoder_config=test/example/cdec.ini # config for cdec -# weights for these features will be printed on each iteration -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -tmp=/tmp -stop_after=10 # stop epoch after 10 inputs - -# interesting stuff -epochs=2 # run over input 2 times -k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 -scorer=stupid_bleu # use 'stupid' BLEU+1 -learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) -gamma=0 # use SVM reg -sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only unique entries in kbest (surface form) -pair_sampling=XYX -hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0 # minimum distance in BLEU (this will still only use pairs with diff > 0) -loss_margin=0 diff --git a/dtrain/test/example/expected-output b/dtrain/test/example/expected-output deleted file mode 100644 index 05326763..00000000 --- a/dtrain/test/example/expected-output +++ /dev/null @@ -1,89 +0,0 @@ - cdec cfg 'test/example/cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading test/example/nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** - Example feature: Shape_S00000_T00000 -Seeding random number sequence to 2912000813 - -dtrain -Parameters: - k 100 - N 4 - T 2 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 1 - gamma 0 - loss margin 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'VOID' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'test/example/cdec.ini' - input 'test/example/nc-wmt11.1k.gz' - output '-' - stop_after 10 -(a dot represents 10 inputs) -Iteration #1 of 2. - . 10 -Stopping after 10 input sentences. -WEIGHTS - Glue = -637 - WordPenalty = +1064 - LanguageModel = +1175.3 - LanguageModel_OOV = -1437 - PhraseModel_0 = +1935.6 - PhraseModel_1 = +2499.3 - PhraseModel_2 = +964.96 - PhraseModel_3 = +1410.8 - PhraseModel_4 = -5977.9 - PhraseModel_5 = +522 - PhraseModel_6 = +1089 - PassThrough = -1308 - --- - 1best avg score: 0.16963 (+0.16963) - 1best avg model score: 64485 (+64485) - avg # pairs: 1494.4 - avg # rank err: 702.6 - avg # margin viol: 0 - non0 feature count: 528 - avg list sz: 85.7 - avg f count: 102.75 -(time 0.083 min, 0.5 s/S) - -Iteration #2 of 2. - . 10 -WEIGHTS - Glue = -1196 - WordPenalty = +809.52 - LanguageModel = +3112.1 - LanguageModel_OOV = -1464 - PhraseModel_0 = +3895.5 - PhraseModel_1 = +4683.4 - PhraseModel_2 = +1092.8 - PhraseModel_3 = +1079.6 - PhraseModel_4 = -6827.7 - PhraseModel_5 = -888 - PhraseModel_6 = +142 - PassThrough = -1335 - --- - 1best avg score: 0.277 (+0.10736) - 1best avg model score: -3110.5 (-67595) - avg # pairs: 1144.2 - avg # rank err: 529.1 - avg # margin viol: 0 - non0 feature count: 859 - avg list sz: 74.9 - avg f count: 112.84 -(time 0.067 min, 0.4 s/S) - -Writing weights file to '-' ... -done - ---- -Best iteration: 2 [SCORE 'stupid_bleu'=0.277]. -This took 0.15 min. diff --git a/dtrain/test/parallelize/cdec.ini b/dtrain/test/parallelize/cdec.ini deleted file mode 100644 index 72e99dc5..00000000 --- a/dtrain/test/parallelize/cdec.ini +++ /dev/null @@ -1,22 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -intersection_strategy=cube_pruning -cubepruning_pop_limit=200 -scfg_max_span_limit=15 -feature_function=WordPenalty -feature_function=KLanguageModel /stor/dat/wmt12/en/news_only/m/wmt12.news.en.3.kenv5 -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/test/parallelize/dtrain.ini b/dtrain/test/parallelize/dtrain.ini deleted file mode 100644 index 03f9d240..00000000 --- a/dtrain/test/parallelize/dtrain.ini +++ /dev/null @@ -1,15 +0,0 @@ -k=100 -N=4 -learning_rate=0.0001 -gamma=0 -loss_margin=0 -epochs=1 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -hi_lo=0.1 -select_weights=last -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -tmp=/tmp -decoder_config=cdec.ini diff --git a/dtrain/test/parallelize/in b/dtrain/test/parallelize/in deleted file mode 100644 index a312809f..00000000 --- a/dtrain/test/parallelize/in +++ /dev/null @@ -1,10 +0,0 @@ -barack obama erhält als vierter us @-@ präsident den frieden nobelpreis -der amerikanische präsident barack obama kommt für 26 stunden nach oslo , norwegen , um hier als vierter us @-@ präsident in der geschichte den frieden nobelpreis entgegen zunehmen . -darüber hinaus erhält er das diplom sowie die medaille und einen scheck über 1,4 mio. dollar für seine außer gewöhnlichen bestrebungen um die intensivierung der welt diplomatie und zusammen arbeit unter den völkern . -der chef des weißen hauses kommt morgen zusammen mit seiner frau michelle in der nordwegischen metropole an und wird die ganze zeit beschäftigt sein . -zunächst stattet er dem nobel @-@ institut einen besuch ab , wo er überhaupt zum ersten mal mit den fünf ausschuss mitglieder zusammen trifft , die ihn im oktober aus 172 leuten und 33 organisationen gewählt haben . -das präsidenten paar hat danach ein treffen mit dem norwegischen könig harald v. und königin sonja eingeplant . -nachmittags erreicht dann der besuch seinen höhepunkt mit der zeremonie , bei der obama den prestige preis übernimmt . -diesen erhält er als der vierte us @-@ präsident , aber erst als der dritte , der den preis direkt im amt entgegen nimmt . -das weiße haus avisierte schon , dass obama bei der übernahme des preises über den afghanistan krieg sprechen wird . -der präsident will diesem thema nicht ausweichen , weil er weiß , dass er den preis als ein präsident übernimmt , der zur zeit krieg in zwei ländern führt . diff --git a/dtrain/test/parallelize/refs b/dtrain/test/parallelize/refs deleted file mode 100644 index 4d3128cb..00000000 --- a/dtrain/test/parallelize/refs +++ /dev/null @@ -1,10 +0,0 @@ -barack obama becomes the fourth american president to receive the nobel peace prize -the american president barack obama will fly into oslo , norway for 26 hours to receive the nobel peace prize , the fourth american president in history to do so . -he will receive a diploma , medal and cheque for 1.4 million dollars for his exceptional efforts to improve global diplomacy and encourage international cooperation , amongst other things . -the head of the white house will be flying into the norwegian city in the morning with his wife michelle and will have a busy schedule . -first , he will visit the nobel institute , where he will have his first meeting with the five committee members who selected him from 172 people and 33 organisations . -the presidential couple then has a meeting scheduled with king harald v and queen sonja of norway . -then , in the afternoon , the visit will culminate in a grand ceremony , at which obama will receive the prestigious award . -he will be the fourth american president to be awarded the prize , and only the third to have received it while actually in office . -the white house has stated that , when he accepts the prize , obama will speak about the war in afghanistan . -the president does not want to skirt around this topic , as he realises that he is accepting the prize as a president whose country is currently at war in two countries . diff --git a/dtrain/test/toy/cdec.ini b/dtrain/test/toy/cdec.ini deleted file mode 100644 index 98b02d44..00000000 --- a/dtrain/test/toy/cdec.ini +++ /dev/null @@ -1,2 +0,0 @@ -formalism=scfg -add_pass_through_rules=true diff --git a/dtrain/test/toy/dtrain.ini b/dtrain/test/toy/dtrain.ini deleted file mode 100644 index a091732f..00000000 --- a/dtrain/test/toy/dtrain.ini +++ /dev/null @@ -1,12 +0,0 @@ -decoder_config=test/toy/cdec.ini -input=test/toy/input -output=- -print_weights=logp shell_rule house_rule small_rule little_rule PassThrough -k=4 -N=4 -epochs=2 -scorer=bleu -sample_from=kbest -filter=uniq -pair_sampling=all -learning_rate=1 diff --git a/dtrain/test/toy/input b/dtrain/test/toy/input deleted file mode 100644 index 4d10a9ea..00000000 --- a/dtrain/test/toy/input +++ /dev/null @@ -1,2 +0,0 @@ -0 ich sah ein kleines haus i saw a little house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1 [JJ] ||| kleines ||| small ||| logp=0 small_rule=1 [JJ] ||| kleines ||| little ||| logp=0 little_rule=1 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 -1 ich fand ein kleines haus i found a little house [S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0 [NP] ||| ich ||| i ||| logp=0 [NP] ||| ein [NN,1] ||| a [1] ||| logp=0 [NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1 [NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1 [JJ] ||| kleines ||| small ||| logp=0 small_rule=1 [JJ] ||| kleines ||| little ||| logp=0 little_rule=1 [JJ] ||| grosses ||| big ||| logp=0 [JJ] ||| grosses ||| large ||| logp=0 [VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0 [V] ||| sah ||| saw ||| logp=0 [V] ||| fand ||| found ||| logp=0 -- cgit v1.2.3