diff options
| author | Avneesh Saluja <asaluja@gmail.com> | 2013-03-28 18:28:16 -0700 | 
|---|---|---|
| committer | Avneesh Saluja <asaluja@gmail.com> | 2013-03-28 18:28:16 -0700 | 
| commit | 3d8d656fa7911524e0e6885647173474524e0784 (patch) | |
| tree | 81b1ee2fcb67980376d03f0aa48e42e53abff222 /dtrain | |
| parent | be7f57fdd484e063775d7abf083b9fa4c403b610 (diff) | |
| parent | 96fedabebafe7a38a6d5928be8fff767e411d705 (diff) | |
fixed conflicts
Diffstat (limited to 'dtrain')
| -rw-r--r-- | dtrain/Makefile.am | 7 | ||||
| -rw-r--r-- | dtrain/README.md | 48 | ||||
| -rw-r--r-- | dtrain/dtrain.cc | 657 | ||||
| -rw-r--r-- | dtrain/dtrain.h | 98 | ||||
| -rwxr-xr-x | dtrain/hstreaming/avg.rb | 32 | ||||
| -rw-r--r-- | dtrain/hstreaming/cdec.ini | 22 | ||||
| -rw-r--r-- | dtrain/hstreaming/dtrain.ini | 15 | ||||
| -rwxr-xr-x | dtrain/hstreaming/dtrain.sh | 9 | ||||
| -rwxr-xr-x | dtrain/hstreaming/hadoop-streaming-job.sh | 30 | ||||
| -rwxr-xr-x | dtrain/hstreaming/lplp.rb | 131 | ||||
| -rw-r--r-- | dtrain/hstreaming/red-test | 9 | ||||
| -rw-r--r-- | dtrain/kbestget.h | 152 | ||||
| -rw-r--r-- | dtrain/ksampler.h | 61 | ||||
| -rw-r--r-- | dtrain/pairsampling.h | 149 | ||||
| -rw-r--r-- | dtrain/score.cc | 254 | ||||
| -rw-r--r-- | dtrain/score.h | 212 | ||||
| -rw-r--r-- | dtrain/test/example/README | 8 | ||||
| -rw-r--r-- | dtrain/test/example/cdec.ini | 24 | ||||
| -rw-r--r-- | dtrain/test/example/dtrain.ini | 22 | ||||
| -rw-r--r-- | dtrain/test/example/expected-output | 125 | ||||
| -rw-r--r-- | dtrain/test/toy/cdec.ini | 2 | ||||
| -rw-r--r-- | dtrain/test/toy/dtrain.ini | 12 | ||||
| -rw-r--r-- | dtrain/test/toy/input | 2 | 
23 files changed, 0 insertions, 2081 deletions
diff --git a/dtrain/Makefile.am b/dtrain/Makefile.am deleted file mode 100644 index 64fef489..00000000 --- a/dtrain/Makefile.am +++ /dev/null @@ -1,7 +0,0 @@ -bin_PROGRAMS = dtrain - -dtrain_SOURCES = dtrain.cc score.cc -dtrain_LDADD   = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz - -AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval - diff --git a/dtrain/README.md b/dtrain/README.md deleted file mode 100644 index 7edabbf1..00000000 --- a/dtrain/README.md +++ /dev/null @@ -1,48 +0,0 @@ -This is a simple (and parallelizable) tuning method for cdec -which is able to train the weights of very many (sparse) features. -It was used here: -  "Joint Feature Selection in Distributed Stochastic -   Learning for Large-Scale Discriminative Training in -   SMT" -(Simianer, Riezler, Dyer; ACL 2012) - - -Building --------- -Builds when building cdec, see ../BUILDING . -To build only parts needed for dtrain do -``` -  autoreconf -ifv -  ./configure [--disable-gtest] -  cd dtrain/; make -``` - -Running -------- -To run this on a dev set locally: -``` -    #define DTRAIN_LOCAL -``` -otherwise remove that line or undef, then recompile. You need a single -grammar file or input annotated with per-sentence grammars (psg) as you -would use with cdec. Additionally you need to give dtrain a file with -references (--refs) when running locally. - -The input for use with hadoop streaming looks like this: -``` -    <sid>\t<source>\t<ref>\t<grammar rules separated by \t> -``` -To convert a psg to this format you need to replace all "\n" -by "\t". Make sure there are no tabs in your data. - -For an example of local usage (with the 'distributed' format) -the see test/example/ . This expects dtrain to be built without -DTRAIN_LOCAL. - -Legal ------ -Copyright (c) 2012 by Patrick Simianer <p@simianer.de> - -See the file ../LICENSE.txt for the licensing terms that this software is -released under. - diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc deleted file mode 100644 index b7a4bb6f..00000000 --- a/dtrain/dtrain.cc +++ /dev/null @@ -1,657 +0,0 @@ -#include "dtrain.h" - - -bool -dtrain_init(int argc, char** argv, po::variables_map* cfg) -{ -  po::options_description ini("Configuration File Options"); -  ini.add_options() -    ("input",             po::value<string>()->default_value("-"),                                                   "input file") -    ("output",            po::value<string>()->default_value("-"),                          "output weights file, '-' for STDOUT") -    ("input_weights",     po::value<string>(),                                "input weights file (e.g. from previous iteration)") -    ("decoder_config",    po::value<string>(),                                                      "configuration file for cdec") -    ("print_weights",     po::value<string>(),                                               "weights to print on each iteration") -    ("stop_after",        po::value<unsigned>()->default_value(0),                                 "stop after X input sentences") -    ("tmp",               po::value<string>()->default_value("/tmp"),                                           "temp dir to use") -    ("keep",              po::value<bool>()->zero_tokens(),                               "keep weights files for each iteration") -    ("hstreaming",        po::value<string>(),                                   "run in hadoop streaming mode, arg is a task id") -    ("epochs",            po::value<unsigned>()->default_value(10),                               "# of iterations T (per shard)") -    ("k",                 po::value<unsigned>()->default_value(100),                            "how many translations to sample") -    ("sample_from",       po::value<string>()->default_value("kbest"),     "where to sample translations from: 'kbest', 'forest'") -    ("filter",            po::value<string>()->default_value("uniq"),                          "filter kbest list: 'not', 'uniq'") -    ("pair_sampling",     po::value<string>()->default_value("XYX"),                 "how to sample pairs: 'all', 'XYX' or 'PRO'") -    ("hi_lo",             po::value<float>()->default_value(0.1),                   "hi and lo (X) for XYX (default 0.1), <= 0.5") -    ("pair_threshold",    po::value<score_t>()->default_value(0.),                         "bleu [0,1] threshold to filter pairs") -    ("N",                 po::value<unsigned>()->default_value(4),                                          "N for Ngrams (BLEU)") -    ("scorer",            po::value<string>()->default_value("stupid_bleu"),      "scoring: bleu, stupid_, smooth_, approx_, lc_") -    ("learning_rate",     po::value<weight_t>()->default_value(0.0001),                                           "learning rate") -    ("gamma",             po::value<weight_t>()->default_value(0.),                            "gamma for SVM (0 for perceptron)") -    ("select_weights",    po::value<string>()->default_value("last"),     "output best, last, avg weights ('VOID' to throw away)") -    ("rescale",           po::value<bool>()->zero_tokens(),                              "rescale weight vector after each input") -    ("l1_reg",            po::value<string>()->default_value("none"),      "apply l1 regularization as in 'Tsuroka et al' (2010)") -    ("l1_reg_strength",   po::value<weight_t>(),                                                     "l1 regularization strength") -    ("fselect",           po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPL") // TODO -    ("approx_bleu_d",     po::value<score_t>()->default_value(0.9),                                   "discount for approx. BLEU") -    ("scale_bleu_diff",   po::value<bool>()->zero_tokens(),                      "learning rate <- bleu diff of a misranked pair") -    ("loss_margin",       po::value<weight_t>()->default_value(0.),  "update if no error in pref pair but model scores this near") -    ("max_pairs",         po::value<unsigned>()->default_value(std::numeric_limits<unsigned>::max()), "max. # of pairs per Sent.") -#ifdef DTRAIN_LOCAL -    ("refs,r",            po::value<string>(),                                                         "references in local mode") -#endif -    ("noup",              po::value<bool>()->zero_tokens(),                                               "do not update weights"); -  po::options_description cl("Command Line Options"); -  cl.add_options() -    ("config,c",         po::value<string>(),              "dtrain config file") -    ("quiet,q",          po::value<bool>()->zero_tokens(),           "be quiet") -    ("verbose,v",        po::value<bool>()->zero_tokens(),         "be verbose"); -  cl.add(ini); -  po::store(parse_command_line(argc, argv, cl), *cfg); -  if (cfg->count("config")) { -    ifstream ini_f((*cfg)["config"].as<string>().c_str()); -    po::store(po::parse_config_file(ini_f, ini), *cfg); -  } -  po::notify(*cfg); -  if (!cfg->count("decoder_config")) { -    cerr << cl << endl; -    return false; -  } -  if (cfg->count("hstreaming") && (*cfg)["output"].as<string>() != "-") { -    cerr << "When using 'hstreaming' the 'output' param should be '-'." << endl; -    return false; -  } -#ifdef DTRAIN_LOCAL -  if ((*cfg)["input"].as<string>() == "-") { -    cerr << "Can't use stdin as input with this binary. Recompile without DTRAIN_LOCAL" << endl; -    return false; -  } -#endif -  if ((*cfg)["sample_from"].as<string>() != "kbest" -       && (*cfg)["sample_from"].as<string>() != "forest") { -    cerr << "Wrong 'sample_from' param: '" << (*cfg)["sample_from"].as<string>() << "', use 'kbest' or 'forest'." << endl; -    return false; -  } -  if ((*cfg)["sample_from"].as<string>() == "kbest" && (*cfg)["filter"].as<string>() != "uniq" && -        (*cfg)["filter"].as<string>() != "not") { -    cerr << "Wrong 'filter' param: '" << (*cfg)["filter"].as<string>() << "', use 'uniq' or 'not'." << endl; -    return false; -  } -  if ((*cfg)["pair_sampling"].as<string>() != "all" && (*cfg)["pair_sampling"].as<string>() != "XYX" && -        (*cfg)["pair_sampling"].as<string>() != "PRO") { -    cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as<string>() << "'." << endl; -    return false; -  } -  if(cfg->count("hi_lo") && (*cfg)["pair_sampling"].as<string>() != "XYX") { -    cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl; -  } -  if((*cfg)["hi_lo"].as<float>() > 0.5 || (*cfg)["hi_lo"].as<float>() < 0.01) { -    cerr << "hi_lo must lie in [0.01, 0.5]" << endl; -    return false; -  } -  if ((*cfg)["pair_threshold"].as<score_t>() < 0) { -    cerr << "The threshold must be >= 0!" << endl; -    return false; -  } -  if ((*cfg)["select_weights"].as<string>() != "last" && (*cfg)["select_weights"].as<string>() != "best" && -        (*cfg)["select_weights"].as<string>() != "avg" && (*cfg)["select_weights"].as<string>() != "VOID") { -    cerr << "Wrong 'select_weights' param: '" << (*cfg)["select_weights"].as<string>() << "', use 'last' or 'best'." << endl; -    return false; -  } -  return true; -} - -int -main(int argc, char** argv) -{ -  // handle most parameters -  po::variables_map cfg; -  if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong -  bool quiet = false; -  if (cfg.count("quiet")) quiet = true; -  bool verbose = false; -  if (cfg.count("verbose")) verbose = true; -  bool noup = false; -  if (cfg.count("noup")) noup = true; -  bool hstreaming = false; -  string task_id; -  if (cfg.count("hstreaming")) { -    hstreaming = true; -    quiet = true; -    task_id = cfg["hstreaming"].as<string>(); -    cerr.precision(17); -  } -  bool rescale = false; -  if (cfg.count("rescale")) rescale = true; -  HSReporter rep(task_id); -  bool keep = false; -  if (cfg.count("keep")) keep = true; - -  const unsigned k = cfg["k"].as<unsigned>(); -  const unsigned N = cfg["N"].as<unsigned>(); -  const unsigned T = cfg["epochs"].as<unsigned>(); -  const unsigned stop_after = cfg["stop_after"].as<unsigned>(); -  const string filter_type = cfg["filter"].as<string>(); -  const string sample_from = cfg["sample_from"].as<string>(); -  const string pair_sampling = cfg["pair_sampling"].as<string>(); -  const score_t pair_threshold = cfg["pair_threshold"].as<score_t>(); -  const string select_weights = cfg["select_weights"].as<string>(); -  const float hi_lo = cfg["hi_lo"].as<float>(); -  const score_t approx_bleu_d = cfg["approx_bleu_d"].as<score_t>(); -  const unsigned max_pairs = cfg["max_pairs"].as<unsigned>(); -  weight_t loss_margin = cfg["loss_margin"].as<weight_t>(); -  if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max(); -  bool scale_bleu_diff = false; -  if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true; -  bool average = false; -  if (select_weights == "avg") -    average = true; -  vector<string> print_weights; -  if (cfg.count("print_weights")) -    boost::split(print_weights, cfg["print_weights"].as<string>(), boost::is_any_of(" ")); - -  // setup decoder -  register_feature_functions(); -  SetSilent(true); -  ReadFile ini_rf(cfg["decoder_config"].as<string>()); -  if (!quiet) -    cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; -  Decoder decoder(ini_rf.stream()); - -  // scoring metric/scorer -  string scorer_str = cfg["scorer"].as<string>(); -  LocalScorer* scorer; -  if (scorer_str == "bleu") { -    scorer = dynamic_cast<BleuScorer*>(new BleuScorer); -  } else if (scorer_str == "stupid_bleu") { -    scorer = dynamic_cast<StupidBleuScorer*>(new StupidBleuScorer); -  } else if (scorer_str == "smooth_bleu") { -    scorer = dynamic_cast<SmoothBleuScorer*>(new SmoothBleuScorer); -  } else if (scorer_str == "sum_bleu") { -    scorer = dynamic_cast<SumBleuScorer*>(new SumBleuScorer); -  } else if (scorer_str == "sumexp_bleu") { -    scorer = dynamic_cast<SumExpBleuScorer*>(new SumExpBleuScorer); -  } else if (scorer_str == "sumwhatever_bleu") { -    scorer = dynamic_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer); -  } else if (scorer_str == "approx_bleu") { -    scorer = dynamic_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d)); -  } else if (scorer_str == "lc_bleu") { -    scorer = dynamic_cast<LinearBleuScorer*>(new LinearBleuScorer(N)); -  } else { -    cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; -    exit(1); -  } -  vector<score_t> bleu_weights; -  scorer->Init(N, bleu_weights); - -  // setup decoder observer -  MT19937 rng; // random number generator, only for forest sampling -  HypSampler* observer; -  if (sample_from == "kbest") -    observer = dynamic_cast<KBestGetter*>(new KBestGetter(k, filter_type)); -  else -    observer = dynamic_cast<KSampler*>(new KSampler(k, &rng)); -  observer->SetScorer(scorer); - -  // init weights -  vector<weight_t>& dense_weights = decoder.CurrentWeightVector(); -  SparseVector<weight_t> lambdas, cumulative_penalties, w_average; -  if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as<string>(), &dense_weights); -  Weights::InitSparseVector(dense_weights, &lambdas); - -  // meta params for perceptron, SVM -  weight_t eta = cfg["learning_rate"].as<weight_t>(); -  weight_t gamma = cfg["gamma"].as<weight_t>(); - -  // l1 regularization -  bool l1naive = false; -  bool l1clip = false; -  bool l1cumul = false; -  weight_t l1_reg = 0; -  if (cfg["l1_reg"].as<string>() != "none") { -    string s = cfg["l1_reg"].as<string>(); -    if (s == "naive") l1naive = true; -    else if (s == "clip") l1clip = true; -    else if (s == "cumul") l1cumul = true; -    l1_reg = cfg["l1_reg_strength"].as<weight_t>(); -  } - -  // output -  string output_fn = cfg["output"].as<string>(); -  // input -  string input_fn = cfg["input"].as<string>(); -  ReadFile input(input_fn); -  // buffer input for t > 0 -  vector<string> src_str_buf;          // source strings (decoder takes only strings) -  vector<vector<WordID> > ref_ids_buf; // references as WordID vecs -  // where temp files go -  string tmp_path = cfg["tmp"].as<string>(); -#ifdef DTRAIN_LOCAL -  string refs_fn = cfg["refs"].as<string>(); -  ReadFile refs(refs_fn); -#else -  string grammar_buf_fn = gettmpf(tmp_path, "dtrain-grammars"); -  ogzstream grammar_buf_out; -  grammar_buf_out.open(grammar_buf_fn.c_str()); -#endif - -  unsigned in_sz = std::numeric_limits<unsigned>::max(); // input index, input size -  vector<pair<score_t, score_t> > all_scores; -  score_t max_score = 0.; -  unsigned best_it = 0; -  float overall_time = 0.; - -  // output cfg -  if (!quiet) { -    cerr << _p5; -    cerr << endl << "dtrain" << endl << "Parameters:" << endl; -    cerr << setw(25) << "k " << k << endl; -    cerr << setw(25) << "N " << N << endl; -    cerr << setw(25) << "T " << T << endl; -    cerr << setw(25) << "scorer '" << scorer_str << "'" << endl; -    if (scorer_str == "approx_bleu") -      cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl; -    cerr << setw(25) << "sample from " << "'" << sample_from << "'" << endl; -    if (sample_from == "kbest") -      cerr << setw(25) << "filter " << "'" << filter_type << "'" << endl; -    if (!scale_bleu_diff) cerr << setw(25) << "learning rate " << eta << endl; -    else cerr << setw(25) << "learning rate " << "bleu diff" << endl; -    cerr << setw(25) << "gamma " << gamma << endl; -    cerr << setw(25) << "loss margin " << loss_margin << endl; -    cerr << setw(25) << "pairs " << "'" << pair_sampling << "'" << endl; -    if (pair_sampling == "XYX") -      cerr << setw(25) << "hi lo " << hi_lo << endl; -    cerr << setw(25) << "pair threshold " << pair_threshold << endl; -    cerr << setw(25) << "select weights " << "'" << select_weights << "'" << endl; -    if (cfg.count("l1_reg")) -      cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as<string>() << "'" << endl; -    if (rescale) -      cerr << setw(25) << "rescale " << rescale << endl; -    cerr << setw(25) << "max pairs " << max_pairs << endl; -    cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; -    cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; -#ifdef DTRAIN_LOCAL -    cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl; -#endif -    cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; -    if (cfg.count("input_weights")) -      cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as<string>() << "'" << endl; -    if (stop_after > 0) -      cerr << setw(25) << "stop_after " << stop_after << endl; -    if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl; -  } - - -  for (unsigned t = 0; t < T; t++) // T epochs -  { - -  if (hstreaming) cerr << "reporter:status:Iteration #" << t+1 << " of " << T << endl; - -  time_t start, end; -  time(&start); -#ifndef DTRAIN_LOCAL -  igzstream grammar_buf_in; -  if (t > 0) grammar_buf_in.open(grammar_buf_fn.c_str()); -#endif -  score_t score_sum = 0.; -  score_t model_sum(0); -  unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0; -  if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl; - -  while(true) -  { - -    string in; -    bool next = false, stop = false; // next iteration or premature stop -    if (t == 0) { -      if(!getline(*input, in)) next = true; -    } else { -      if (ii == in_sz) next = true; // stop if we reach the end of our input -    } -    // stop after X sentences (but still go on for those) -    if (stop_after > 0 && stop_after == ii && !next) stop = true; - -    // produce some pretty output -    if (!quiet && !verbose) { -      if (ii == 0) cerr << " "; -      if ((ii+1) % (DTRAIN_DOTS) == 0) { -        cerr << "."; -        cerr.flush(); -      } -      if ((ii+1) % (20*DTRAIN_DOTS) == 0) { -        cerr << " " << ii+1 << endl; -        if (!next && !stop) cerr << " "; -      } -      if (stop) { -        if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; -        cerr << "Stopping after " << stop_after << " input sentences." << endl; -      } else { -        if (next) { -          if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; -        } -      } -    } - -    // next iteration -    if (next || stop) break; - -    // weights -    lambdas.init_vector(&dense_weights); - -    // getting input -    vector<WordID> ref_ids; // reference as vector<WordID> -#ifndef DTRAIN_LOCAL -    vector<string> in_split; // input: sid\tsrc\tref\tpsg -    if (t == 0) { -      // handling input -      split_in(in, in_split); -      if (hstreaming && ii == 0) cerr << "reporter:counter:" << task_id << ",First ID," << in_split[0] << endl; -      // getting reference -      vector<string> ref_tok; -      boost::split(ref_tok, in_split[2], boost::is_any_of(" ")); -      register_and_convert(ref_tok, ref_ids); -      ref_ids_buf.push_back(ref_ids); -      // process and set grammar -      bool broken_grammar = true; // ignore broken grammars -      for (string::iterator it = in.begin(); it != in.end(); it++) { -        if (!isspace(*it)) { -          broken_grammar = false; -          break; -        } -      } -      if (broken_grammar) { -        cerr << "Broken grammar for " << ii+1 << "! Ignoring this input." << endl; -        continue; -      } -      boost::replace_all(in, "\t", "\n"); -      in += "\n"; -      grammar_buf_out << in << DTRAIN_GRAMMAR_DELIM << " " << in_split[0] << endl; -      decoder.AddSupplementalGrammarFromString(in); -      src_str_buf.push_back(in_split[1]); -      // decode -      observer->SetRef(ref_ids); -      decoder.Decode(in_split[1], observer); -    } else { -      // get buffered grammar -      string grammar_str; -      while (true) { -        string rule; -        getline(grammar_buf_in, rule); -        if (boost::starts_with(rule, DTRAIN_GRAMMAR_DELIM)) break; -        grammar_str += rule + "\n"; -      } -      decoder.AddSupplementalGrammarFromString(grammar_str); -      // decode -      observer->SetRef(ref_ids_buf[ii]); -      decoder.Decode(src_str_buf[ii], observer); -    } -#else -    if (t == 0) { -      string r_; -      getline(*refs, r_); -      vector<string> ref_tok; -      boost::split(ref_tok, r_, boost::is_any_of(" ")); -      register_and_convert(ref_tok, ref_ids); -      ref_ids_buf.push_back(ref_ids); -      src_str_buf.push_back(in); -    } else { -      ref_ids = ref_ids_buf[ii]; -    } -    observer->SetRef(ref_ids); -    if (t == 0) -      decoder.Decode(in, observer); -    else -      decoder.Decode(src_str_buf[ii], observer); -#endif - -    // get (scored) samples -    vector<ScoredHyp>* samples = observer->GetSamples(); - -    if (verbose) { -      cerr << "--- ref for " << ii << ": "; -      if (t > 0) printWordIDVec(ref_ids_buf[ii]); -      else printWordIDVec(ref_ids); -      cerr << endl; -      for (unsigned u = 0; u < samples->size(); u++) { -        cerr << _p2 << _np << "[" << u << ". '"; -        printWordIDVec((*samples)[u].w); -        cerr << "'" << endl; -        cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl; -        cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl; -      } -    } - -    score_sum += (*samples)[0].score; // stats for 1best -    model_sum += (*samples)[0].model; - -    f_count += observer->get_f_count(); -    list_sz += observer->get_sz(); - -    // weight updates -    if (!noup) { -      // get pairs -      vector<pair<ScoredHyp,ScoredHyp> > pairs; -      if (pair_sampling == "all") -        all_pairs(samples, pairs, pair_threshold, max_pairs); -      if (pair_sampling == "XYX") -        partXYX(samples, pairs, pair_threshold, max_pairs, hi_lo); -      if (pair_sampling == "PRO") -        PROsampling(samples, pairs, pair_threshold, max_pairs); -      npairs += pairs.size(); - -      for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); -           it != pairs.end(); it++) { -#ifdef DTRAIN_FASTER_PERCEPTRON -        bool rank_error = true; // pair sampling already did this for us -        rank_errors++; -        score_t margin = std::numeric_limits<float>::max(); -#else -        bool rank_error = it->first.model <= it->second.model; -        if (rank_error) rank_errors++; -        score_t margin = fabs(fabs(it->first.model) - fabs(it->second.model)); -        if (!rank_error && margin < loss_margin) margin_violations++; -#endif -        if (scale_bleu_diff) eta = it->first.score - it->second.score; -        if (rank_error || margin < loss_margin) { -          SparseVector<weight_t> diff_vec = it->first.f - it->second.f; -          lambdas.plus_eq_v_times_s(diff_vec, eta); -          if (gamma) -            lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); -        } -      } - -      // l1 regularization -      if (l1naive) { -        for (unsigned d = 0; d < lambdas.size(); d++) { -          weight_t v = lambdas.get(d); -          lambdas.set_value(d, v - sign(v) * l1_reg); -        } -      } else if (l1clip) { -        for (unsigned d = 0; d < lambdas.size(); d++) { -          if (lambdas.nonzero(d)) { -            weight_t v = lambdas.get(d); -            if (v > 0) { -              lambdas.set_value(d, max(0., v - l1_reg)); -            } else { -              lambdas.set_value(d, min(0., v + l1_reg)); -            } -          } -        } -      } else if (l1cumul) { -        weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input -        for (unsigned d = 0; d < lambdas.size(); d++) { -          if (lambdas.nonzero(d)) { -            weight_t v = lambdas.get(d); -            weight_t penalty = 0; -            if (v > 0) { -              penalty = max(0., v-(acc_penalty + cumulative_penalties.get(d))); -            } else { -              penalty = min(0., v+(acc_penalty - cumulative_penalties.get(d))); -            } -            lambdas.set_value(d, penalty); -            cumulative_penalties.set_value(d, cumulative_penalties.get(d)+penalty); -          } -        } -      } - -    } - -    if (rescale) lambdas /= lambdas.l2norm(); - -    ++ii; - -    if (hstreaming) { -      rep.update_counter("Seen #"+boost::lexical_cast<string>(t+1), 1u); -      rep.update_counter("Seen", 1u); -    } - -  } // input loop - -  if (average) w_average += lambdas; - -  if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset(); - -  if (t == 0) { -    in_sz = ii; // remember size of input (# lines) -    if (hstreaming) { -      rep.update_counter("|Input|", ii); -      rep.update_gcounter("|Input|", ii); -      rep.update_gcounter("Shards", 1u); -    } -  } - -#ifndef DTRAIN_LOCAL -  if (t == 0) { -    grammar_buf_out.close(); -  } else { -    grammar_buf_in.close(); -  } -#endif - -  // print some stats -  score_t score_avg = score_sum/(score_t)in_sz; -  score_t model_avg = model_sum/(score_t)in_sz; -  score_t score_diff, model_diff; -  if (t > 0) { -    score_diff = score_avg - all_scores[t-1].first; -    model_diff = model_avg - all_scores[t-1].second; -  } else { -    score_diff = score_avg; -    model_diff = model_avg; -  } - -  unsigned nonz = 0; -  if (!quiet || hstreaming) nonz = (unsigned)lambdas.num_nonzero(); - -  if (!quiet) { -    cerr << _p5 << _p << "WEIGHTS" << endl; -    for (vector<string>::iterator it = print_weights.begin(); it != print_weights.end(); it++) { -      cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl; -    } -    cerr << "        ---" << endl; -    cerr << _np << "       1best avg score: " << score_avg; -    cerr << _p << " (" << score_diff << ")" << endl; -    cerr << _np << " 1best avg model score: " << model_avg; -    cerr << _p << " (" << model_diff << ")" << endl; -    cerr << "           avg # pairs: "; -    cerr << _np << npairs/(float)in_sz << endl; -    cerr << "        avg # rank err: "; -    cerr << rank_errors/(float)in_sz << endl; -#ifndef DTRAIN_FASTER_PERCEPTRON -    cerr << "     avg # margin viol: "; -    cerr << margin_violations/(float)in_sz << endl; -#endif -    cerr << "    non0 feature count: " <<  nonz << endl; -    cerr << "           avg list sz: " << list_sz/(float)in_sz << endl; -    cerr << "           avg f count: " << f_count/(float)list_sz << endl; -  } - -  if (hstreaming) { -    rep.update_counter("Score 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(score_avg*DTRAIN_SCALE)); -    rep.update_counter("Model 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(model_avg*DTRAIN_SCALE)); -    rep.update_counter("Pairs avg #"+boost::lexical_cast<string>(t+1), (unsigned)((npairs/(weight_t)in_sz)*DTRAIN_SCALE)); -    rep.update_counter("Rank errors avg #"+boost::lexical_cast<string>(t+1), (unsigned)((rank_errors/(weight_t)in_sz)*DTRAIN_SCALE)); -    rep.update_counter("Margin violations avg #"+boost::lexical_cast<string>(t+1), (unsigned)((margin_violations/(weight_t)in_sz)*DTRAIN_SCALE)); -    rep.update_counter("Non zero feature count #"+boost::lexical_cast<string>(t+1), nonz); -    rep.update_gcounter("Non zero feature count #"+boost::lexical_cast<string>(t+1), nonz); -  } - -  pair<score_t,score_t> remember; -  remember.first = score_avg; -  remember.second = model_avg; -  all_scores.push_back(remember); -  if (score_avg > max_score) { -    max_score = score_avg; -    best_it = t; -  } -  time (&end); -  float time_diff = difftime(end, start); -  overall_time += time_diff; -  if (!quiet) { -    cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; -    cerr << time_diff/in_sz << " s/S)" << endl; -  } -  if (t+1 != T && !quiet) cerr << endl; - -  if (noup) break; - -  // write weights to file -  if (select_weights == "best" || keep) { -    lambdas.init_vector(&dense_weights); -    string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz"; -    Weights::WriteToFile(w_fn, dense_weights, true); -  } - -  } // outer loop - -  if (average) w_average /= (weight_t)T; - -#ifndef DTRAIN_LOCAL -  unlink(grammar_buf_fn.c_str()); -#endif - -  if (!noup) { -    if (!quiet) cerr << endl << "Writing weights file to '" << output_fn << "' ..." << endl; -    if (select_weights == "last" || average) { // last, average -      WriteFile of(output_fn); // works with '-' -      ostream& o = *of.stream(); -      o.precision(17); -      o << _np; -      if (average) { -        for (SparseVector<weight_t>::iterator it = w_average.begin(); it != w_average.end(); ++it) { -	      if (it->second == 0) continue; -          o << FD::Convert(it->first) << '\t' << it->second << endl; -        } -      } else { -        for (SparseVector<weight_t>::iterator it = lambdas.begin(); it != lambdas.end(); ++it) { -	      if (it->second == 0) continue; -          o << FD::Convert(it->first) << '\t' << it->second << endl; -        } -      } -    } else if (select_weights == "VOID") { // do nothing with the weights -    } else { // best -      if (output_fn != "-") { -        CopyFile("weights."+boost::lexical_cast<string>(best_it)+".gz", output_fn); -      } else { -        ReadFile bestw("weights."+boost::lexical_cast<string>(best_it)+".gz"); -        string o; -        cout.precision(17); -        cout << _np; -        while(getline(*bestw, o)) cout << o << endl; -      } -      if (!keep) { -        for (unsigned i = 0; i < T; i++) { -          string s = "weights." + boost::lexical_cast<string>(i) + ".gz"; -          unlink(s.c_str()); -        } -      } -    } -    if (output_fn == "-" && hstreaming) cout << "__SHARD_COUNT__\t1" << endl; -    if (!quiet) cerr << "done" << endl; -  } - -  if (!quiet) { -    cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; -    cerr << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; -    cerr << "This took " << overall_time/60. << " min." << endl; -  } -} - diff --git a/dtrain/dtrain.h b/dtrain/dtrain.h deleted file mode 100644 index 7e084a79..00000000 --- a/dtrain/dtrain.h +++ /dev/null @@ -1,98 +0,0 @@ -#ifndef _DTRAIN_H_ -#define _DTRAIN_H_ - -#undef DTRAIN_FASTER_PERCEPTRON // only look at misranked pairs -                                 // DO NOT USE WITH SVM! -#define DTRAIN_LOCAL -#define DTRAIN_DOTS 10 // after how many inputs to display a '.' -#define DTRAIN_GRAMMAR_DELIM "########EOS########" -#define DTRAIN_SCALE 100000 - - -#include <iomanip> -#include <climits> -#include <string.h> - -#include <boost/algorithm/string.hpp> -#include <boost/program_options.hpp> - -#include "ksampler.h" -#include "pairsampling.h" - -#include "filelib.h" - - - -using namespace std; -using namespace dtrain; -namespace po = boost::program_options; - -inline void register_and_convert(const vector<string>& strs, vector<WordID>& ids) -{ -  vector<string>::const_iterator it; -  for (it = strs.begin(); it < strs.end(); it++) -    ids.push_back(TD::Convert(*it)); -} - -inline string gettmpf(const string path, const string infix) -{ -  char fn[path.size() + infix.size() + 8]; -  strcpy(fn, path.c_str()); -  strcat(fn, "/"); -  strcat(fn, infix.c_str()); -  strcat(fn, "-XXXXXX"); -  if (!mkstemp(fn)) { -    cerr << "Cannot make temp file in" << path << " , exiting." << endl; -    exit(1); -  } -  return string(fn); -} - -inline void split_in(string& s, vector<string>& parts) -{ -  unsigned f = 0; -  for(unsigned i = 0; i < 3; i++) { -    unsigned e = f; -    f = s.find("\t", f+1); -    if (e != 0) parts.push_back(s.substr(e+1, f-e-1)); -    else parts.push_back(s.substr(0, f)); -  } -  s.erase(0, f+1); -} - -struct HSReporter -{ -  string task_id_; - -  HSReporter(string task_id) : task_id_(task_id) {} - -  inline void update_counter(string name, unsigned amount) { -    cerr << "reporter:counter:" << task_id_ << "," << name << "," << amount << endl; -  } -  inline void update_gcounter(string name, unsigned amount) { -    cerr << "reporter:counter:Global," << name << "," << amount << endl; -  } -}; - -inline ostream& _np(ostream& out) { return out << resetiosflags(ios::showpos); } -inline ostream& _p(ostream& out)  { return out << setiosflags(ios::showpos); } -inline ostream& _p2(ostream& out) { return out << setprecision(2); } -inline ostream& _p5(ostream& out) { return out << setprecision(5); } - -inline void printWordIDVec(vector<WordID>& v) -{ -  for (unsigned i = 0; i < v.size(); i++) { -    cerr << TD::Convert(v[i]); -    if (i < v.size()-1) cerr << " "; -  } -} - -template<typename T> -inline T sign(T z) -{ -  if (z == 0) return 0; -  return z < 0 ? -1 : +1; -} - -#endif - diff --git a/dtrain/hstreaming/avg.rb b/dtrain/hstreaming/avg.rb deleted file mode 100755 index 2599c732..00000000 --- a/dtrain/hstreaming/avg.rb +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env ruby -# first arg may be an int of custom shard count - -shard_count_key = "__SHARD_COUNT__" - -STDIN.set_encoding 'utf-8' -STDOUT.set_encoding 'utf-8' - -w = {} -c = {} -w.default = 0 -c.default = 0 -while line = STDIN.gets -  key, val = line.split /\s/ -  w[key] += val.to_f -  c[key] += 1 -end - -if ARGV.size == 0 -  shard_count = w["__SHARD_COUNT__"] -else -  shard_count = ARGV[0].to_f -end -w.each_key { |k| -  if k == shard_count_key -    next -  else -    puts "#{k}\t#{w[k]/shard_count}" -    #puts "# #{c[k]}" -  end -} - diff --git a/dtrain/hstreaming/cdec.ini b/dtrain/hstreaming/cdec.ini deleted file mode 100644 index d4f5cecd..00000000 --- a/dtrain/hstreaming/cdec.ini +++ /dev/null @@ -1,22 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -scfg_max_span_limit=15 -intersection_strategy=cube_pruning -cubepruning_pop_limit=30 -feature_function=WordPenalty -feature_function=KLanguageModel nc-wmt11.en.srilm.gz -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/hstreaming/dtrain.ini b/dtrain/hstreaming/dtrain.ini deleted file mode 100644 index a2c219a1..00000000 --- a/dtrain/hstreaming/dtrain.ini +++ /dev/null @@ -1,15 +0,0 @@ -input=- -output=- -decoder_config=cdec.ini -tmp=/var/hadoop/mapred/local/ -epochs=1 -k=100 -N=4 -learning_rate=0.0001 -gamma=0 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -pair_threshold=0 -select_weights=last diff --git a/dtrain/hstreaming/dtrain.sh b/dtrain/hstreaming/dtrain.sh deleted file mode 100755 index 877ff94c..00000000 --- a/dtrain/hstreaming/dtrain.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -# script to run dtrain with a task id - -pushd . &>/dev/null -cd .. -ID=$(basename $(pwd)) # attempt_... -popd &>/dev/null -./dtrain -c dtrain.ini --hstreaming $ID - diff --git a/dtrain/hstreaming/hadoop-streaming-job.sh b/dtrain/hstreaming/hadoop-streaming-job.sh deleted file mode 100755 index 92419956..00000000 --- a/dtrain/hstreaming/hadoop-streaming-job.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -EXP=a_simple_test - -# change these vars to fit your hadoop installation -HADOOP_HOME=/usr/lib/hadoop-0.20 -JAR=contrib/streaming/hadoop-streaming-0.20.2-cdh3u1.jar -HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" - - IN=input_on_hdfs -OUT=output_weights_on_hdfs - -# you can -reducer to NONE if you want to -# do feature selection/averaging locally (e.g. to -# keep weights of all epochs) -$HSTREAMING \ -    -mapper "dtrain.sh" \ -    -reducer "ruby lplp.rb l2 select_k 100000" \ -    -input $IN \ -    -output $OUT \ -    -file dtrain.sh \ -    -file lplp.rb \ -    -file ../dtrain \ -    -file dtrain.ini \ -    -file cdec.ini \ -    -file ../test/example/nc-wmt11.en.srilm.gz \ -    -jobconf mapred.reduce.tasks=30 \ -    -jobconf mapred.max.map.failures.percent=0 \ -    -jobconf mapred.job.name="dtrain $EXP" - diff --git a/dtrain/hstreaming/lplp.rb b/dtrain/hstreaming/lplp.rb deleted file mode 100755 index f0cd58c5..00000000 --- a/dtrain/hstreaming/lplp.rb +++ /dev/null @@ -1,131 +0,0 @@ -# lplp.rb - -# norms -def l0(feature_column, n) -  if feature_column.size >= n then return 1 else return 0 end -end - -def l1(feature_column, n=-1) -  return feature_column.map { |i| i.abs }.reduce { |sum,i| sum+i } -end - -def l2(feature_column, n=-1) -  return Math.sqrt feature_column.map { |i| i.abs2 }.reduce { |sum,i| sum+i } -end - -def linfty(feature_column, n=-1) -  return feature_column.map { |i| i.abs }.max -end - -# stats -def median(feature_column, n) -  return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0}).sort[feature_column.size/2] -end - -def mean(feature_column, n) -  return feature_column.reduce { |sum, i| sum+i } / n -end - -# selection -def select_k(weights, norm_fun, n, k=10000) -  weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p| -    puts "#{p[0]}\t#{mean(p[1], n)}" -    k -= 1 -    if k == 0 then break end -  } -end - -def cut(weights, norm_fun, n, epsilon=0.0001) -  weights.each { |k,v| -    if norm_fun.call(v, n).abs >= epsilon -      puts "#{k}\t#{mean(v, n)}" -    end -  } -end - -# test -def _test() -  puts -  w = {} -  w["a"] = [1, 2, 3] -  w["b"] = [1, 2] -  w["c"] = [66] -  w["d"] = [10, 20, 30] -  n = 3 -  puts w.to_s -  puts -  puts "select_k" -  puts "l0 expect ad" -  select_k(w, method(:l0), n, 2) -  puts "l1 expect cd" -  select_k(w, method(:l1), n, 2) -  puts "l2 expect c" -  select_k(w, method(:l2), n, 1) -  puts -  puts "cut" -  puts "l1 expect cd" -  cut(w, method(:l1), n, 7) -  puts -  puts "median" -  a = [1,2,3,4,5] -  puts a.to_s -  puts median(a, 5) -  puts -  puts "#{median(a, 7)} <- that's because we add missing 0s:" -  puts a.concat(0.step(7-a.size-1).map{|i|0}).to_s -  puts -  puts "mean expect bc" -  w.clear -  w["a"] = [2] -  w["b"] = [2.1] -  w["c"] = [2.2] -  cut(w, method(:mean), 1, 2.05) - exit -end -#_test() - -# actually do something -def usage() -  puts "lplp.rb <l0,l1,l2,linfty,mean,median> <cut|select_k> <k|threshold> [n] < <input>" -  puts "   l0...: norms for selection" -  puts "select_k: only output top k (according to the norm of their column vector) features" -  puts "     cut: output features with weight >= threshold" -  puts "       n: if we do not have a shard count use this number for averaging" -  exit -end - -if ARGV.size < 3 then usage end -norm_fun = method(ARGV[0].to_sym) -type = ARGV[1] -x = ARGV[2].to_f - -shard_count_key = "__SHARD_COUNT__" - -STDIN.set_encoding 'utf-8' -STDOUT.set_encoding 'utf-8' - -w = {} -shard_count = 0 -while line = STDIN.gets -  key, val = line.split /\s+/ -  if key == shard_count_key -    shard_count += 1 -    next -  end -  if w.has_key? key -    w[key].push val.to_f -  else -    w[key] = [val.to_f] -  end -end - -if ARGV.size == 4 then shard_count = ARGV[3].to_f end - -if type == 'cut' -  cut(w, norm_fun, shard_count, x) -elsif type == 'select_k' -  select_k(w, norm_fun, shard_count, x) -else -  puts "oh oh" -end - diff --git a/dtrain/hstreaming/red-test b/dtrain/hstreaming/red-test deleted file mode 100644 index 2623d697..00000000 --- a/dtrain/hstreaming/red-test +++ /dev/null @@ -1,9 +0,0 @@ -a	1 -b	2 -c	3.5 -a	1 -b	2 -c	3.5 -d	1 -e	2 -__SHARD_COUNT__	2 diff --git a/dtrain/kbestget.h b/dtrain/kbestget.h deleted file mode 100644 index dd8882e1..00000000 --- a/dtrain/kbestget.h +++ /dev/null @@ -1,152 +0,0 @@ -#ifndef _DTRAIN_KBESTGET_H_ -#define _DTRAIN_KBESTGET_H_ - -#include "kbest.h" // cdec -#include "sentence_metadata.h" - -#include "verbose.h" -#include "viterbi.h" -#include "ff_register.h" -#include "decoder.h" -#include "weights.h" -#include "logval.h" - -using namespace std; - -namespace dtrain -{ - - -typedef double score_t; - -struct ScoredHyp -{ -  vector<WordID> w; -  SparseVector<double> f; -  score_t model; -  score_t score; -  unsigned rank; -}; - -struct LocalScorer -{ -  unsigned N_; -  vector<score_t> w_; - -  virtual score_t -  Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; - -  void Reset() {} // only for approx bleu - -  inline void -  Init(unsigned N, vector<score_t> weights) -  { -    assert(N > 0); -    N_ = N; -    if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_); -    else w_ = weights; -  } - -  inline score_t -  brevity_penalty(const unsigned hyp_len, const unsigned ref_len) -  { -    if (hyp_len > ref_len) return 1; -    return exp(1 - (score_t)ref_len/hyp_len); -  } -}; - -struct HypSampler : public DecoderObserver -{ -  LocalScorer* scorer_; -  vector<WordID>* ref_; -  unsigned f_count_, sz_; -  virtual vector<ScoredHyp>* GetSamples()=0; -  inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; } -  inline void SetRef(vector<WordID>& ref) { ref_ = &ref; } -  inline unsigned get_f_count() { return f_count_; } -  inline unsigned get_sz() { return sz_; } -}; -//////////////////////////////////////////////////////////////////////////////// - - - - -struct KBestGetter : public HypSampler -{ -  const unsigned k_; -  const string filter_type_; -  vector<ScoredHyp> s_; -  unsigned src_len_; - -  KBestGetter(const unsigned k, const string filter_type) : -    k_(k), filter_type_(filter_type) {} - -  virtual void -  NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) -  { -    src_len_ = smeta.GetSourceLength(); -    KBestScored(*hg); -  } - -  vector<ScoredHyp>* GetSamples() { return &s_; } - -  void -  KBestScored(const Hypergraph& forest) -  { -    if (filter_type_ == "uniq") { -      KBestUnique(forest); -    } else if (filter_type_ == "not") { -      KBestNoFilter(forest); -    } -  } - -  void -  KBestUnique(const Hypergraph& forest) -  { -    s_.clear(); sz_ = f_count_ = 0; -    KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, -      KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_); -    for (unsigned i = 0; i < k_; ++i) { -      const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, KBest::FilterUnique, -              prob_t, EdgeProb>::Derivation* d = -            kbest.LazyKthBest(forest.nodes_.size() - 1, i); -      if (!d) break; -      ScoredHyp h; -      h.w = d->yield; -      h.f = d->feature_values; -      h.model = log(d->score); -      h.rank = i; -      h.score = scorer_->Score(h.w, *ref_, i, src_len_); -      s_.push_back(h); -      sz_++; -      f_count_ += h.f.size(); -    } -  } - -  void -  KBestNoFilter(const Hypergraph& forest) -  { -    s_.clear(); sz_ = f_count_ = 0; -    KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(forest, k_); -    for (unsigned i = 0; i < k_; ++i) { -      const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d = -            kbest.LazyKthBest(forest.nodes_.size() - 1, i); -      if (!d) break; -      ScoredHyp h; -      h.w = d->yield; -      h.f = d->feature_values; -      h.model = log(d->score); -      h.rank = i; -      h.score = scorer_->Score(h.w, *ref_, i, src_len_); -      s_.push_back(h); -      sz_++; -      f_count_ += h.f.size(); -    } -  } -}; - - -} // namespace - -#endif - diff --git a/dtrain/ksampler.h b/dtrain/ksampler.h deleted file mode 100644 index bc2f56cd..00000000 --- a/dtrain/ksampler.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _DTRAIN_KSAMPLER_H_ -#define _DTRAIN_KSAMPLER_H_ - -#include "hg_sampler.h" // cdec -#include "kbestget.h" -#include "score.h" - -namespace dtrain -{ - -bool -cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b) -{ -  return a.model > b.model; -} - -struct KSampler : public HypSampler -{ -  const unsigned k_; -  vector<ScoredHyp> s_; -  MT19937* prng_; -  score_t (*scorer)(NgramCounts&, const unsigned, const unsigned, unsigned, vector<score_t>); -  unsigned src_len_; - -  explicit KSampler(const unsigned k, MT19937* prng) : -    k_(k), prng_(prng) {} - -  virtual void -  NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) -  { -    src_len_ = smeta.GetSourceLength(); -    ScoredSamples(*hg); -  } - -  vector<ScoredHyp>* GetSamples() { return &s_; } - -  void ScoredSamples(const Hypergraph& forest) { -    s_.clear(); sz_ = f_count_ = 0; -    std::vector<HypergraphSampler::Hypothesis> samples; -    HypergraphSampler::sample_hypotheses(forest, k_, prng_, &samples); -    for (unsigned i = 0; i < k_; ++i) { -      ScoredHyp h; -      h.w = samples[i].words; -      h.f = samples[i].fmap; -      h.model = log(samples[i].model_score); -      h.rank = i; -      h.score = scorer_->Score(h.w, *ref_, i, src_len_); -      s_.push_back(h); -      sz_++; -      f_count_ += h.f.size(); -    } -    sort(s_.begin(), s_.end(), cmp_hyp_by_model_d); -    for (unsigned i = 0; i < s_.size(); i++) s_[i].rank = i; -  } -}; - - -} // namespace - -#endif - diff --git a/dtrain/pairsampling.h b/dtrain/pairsampling.h deleted file mode 100644 index 84be1efb..00000000 --- a/dtrain/pairsampling.h +++ /dev/null @@ -1,149 +0,0 @@ -#ifndef _DTRAIN_PAIRSAMPLING_H_ -#define _DTRAIN_PAIRSAMPLING_H_ - -namespace dtrain -{ - - -bool -accept_pair(score_t a, score_t b, score_t threshold) -{ -  if (fabs(a - b) < threshold) return false; -  return true; -} - -bool -cmp_hyp_by_score_d(ScoredHyp a, ScoredHyp b) -{ -  return a.score > b.score; -} - -inline void -all_pairs(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, float _unused=1) -{ -  sort(s->begin(), s->end(), cmp_hyp_by_score_d); -  unsigned sz = s->size(); -  bool b = false; -  unsigned count = 0; -  for (unsigned i = 0; i < sz-1; i++) { -    for (unsigned j = i+1; j < sz; j++) { -      if (threshold > 0) { -        if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } else { -        if ((*s)[i].score != (*s)[j].score) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } -      if (++count == max) { -        b = true; -        break; -      } -    } -    if (b) break; -  } -} - -/* - * multipartite ranking - *  sort (descending) by bleu - *  compare top X to middle Y and low X - *  cmp middle Y to low X - */ - -inline void -partXYX(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, float hi_lo) -{ -  unsigned sz = s->size(); -  if (sz < 2) return; -  sort(s->begin(), s->end(), cmp_hyp_by_score_d); -  unsigned sep = round(sz*hi_lo); -  unsigned sep_hi = sep; -  if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi; -  else sep_hi = 1; -  bool b = false; -  unsigned count = 0; -  for (unsigned i = 0; i < sep_hi; i++) { -    for (unsigned j = sep_hi; j < sz; j++) { -#ifdef DTRAIN_FASTER_PERCEPTRON -      if ((*s)[i].model <= (*s)[j].model) { -#endif -      if (threshold > 0) { -        if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } else { -        if ((*s)[i].score != (*s)[j].score) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } -      if (++count == max) { -        b = true; -        break; -      } -#ifdef DTRAIN_FASTER_PERCEPTRON -      } -#endif -    } -    if (b) break; -  } -  unsigned sep_lo = sz-sep; -  while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo; -  for (unsigned i = sep_hi; i < sz-sep_lo; i++) { -    for (unsigned j = sz-sep_lo; j < sz; j++) { -#ifdef DTRAIN_FASTER_PERCEPTRON -      if ((*s)[i].model <= (*s)[j].model) { -#endif -      if (threshold > 0) { -        if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } else { -        if ((*s)[i].score != (*s)[j].score) -          training.push_back(make_pair((*s)[i], (*s)[j])); -      } -      if (++count == max) return; -#ifdef DTRAIN_FASTER_PERCEPTRON -      } -#endif -    } -  } -} - -/* - * pair sampling as in - * 'Tuning as Ranking' (Hopkins & May, 2011) - *     count = 5000 - * threshold = 5% BLEU (0.05 for param 3) - *       cut = top 50 - */ -bool -_PRO_cmp_pair_by_diff_d(pair<ScoredHyp,ScoredHyp> a, pair<ScoredHyp,ScoredHyp> b) -{ -  return (fabs(a.first.score - a.second.score)) > (fabs(b.first.score - b.second.score)); -} -inline void -PROsampling(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, float _unused=1) -{ -  unsigned max_count = 5000, count = 0, sz = s->size(); -  bool b = false; -  for (unsigned i = 0; i < sz-1; i++) { -    for (unsigned j = i+1; j < sz; j++) { -      if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) { -        training.push_back(make_pair((*s)[i], (*s)[j])); -        if (++count == max_count) { -          b = true; -          break; -        } -      } -    } -    if (b) break; -  } -  if (training.size() > 50) { -    sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff_d); -    training.erase(training.begin()+50, training.end()); -  } -  return; -} - - -} // namespace - -#endif - diff --git a/dtrain/score.cc b/dtrain/score.cc deleted file mode 100644 index 34fc86a9..00000000 --- a/dtrain/score.cc +++ /dev/null @@ -1,254 +0,0 @@ -#include "score.h" - -namespace dtrain -{ - - -/* - * bleu - * - * as in "BLEU: a Method for Automatic Evaluation - *        of Machine Translation" - * (Papineni et al. '02) - * - * NOTE: 0 if for one n \in {1..N} count is 0 - */ -score_t -BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len) -{ -  if (hyp_len == 0 || ref_len == 0) return 0.; -  unsigned M = N_; -  vector<score_t> v = w_; -  if (ref_len < N_) { -    M = ref_len; -    for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); -  } -  score_t sum = 0; -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) return 0.; -    sum += v[i] * log((score_t)counts.clipped_[i]/counts.sum_[i]); -  } -  return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -score_t -BleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                  const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  return Bleu(counts, hyp_len, ref_len); -} - -/* - * 'stupid' bleu - * - * as in "ORANGE: a Method for Evaluating - *        Automatic Evaluation Metrics - *        for Machine Translation" - * (Lin & Och '04) - * - * NOTE: 0 iff no 1gram match - */ -score_t -StupidBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  unsigned M = N_; -  vector<score_t> v = w_; -  if (ref_len < N_) { -    M = ref_len; -    for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); -  } -  score_t sum = 0, add = 0; -  for (unsigned i = 0; i < M; i++) { -    if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; -    if (i == 1) add = 1; -    sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); -  } -  return  brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -/* - * smooth bleu - * - * as in "An End-to-End Discriminative Approach - *        to Machine Translation" - * (Liang et al. '06) - * - * NOTE: max is 0.9375 (with N=4) - */ -score_t -SmoothBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  unsigned M = N_; -  if (ref_len < N_) M = ref_len; -  score_t sum = 0.; -  vector<score_t> i_bleu; -  for (unsigned i = 0; i < M; i++) i_bleu.push_back(0.); -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) { -      break; -    } else { -      score_t i_ng = log((score_t)counts.clipped_[i]/counts.sum_[i]); -      for (unsigned j = i; j < M; j++) { -        i_bleu[j] += (1/((score_t)j+1)) * i_ng; -      } -    } -    sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i)); -  } -  return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' bleu - * - * sum up Ngram precisions - */ -score_t -SumBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  unsigned M = N_; -  if (ref_len < N_) M = ref_len; -  score_t sum = 0.; -  unsigned j = 1; -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; -    sum += ((score_t)counts.clipped_[i]/counts.sum_[i])/pow(2.0, (double) (N_-j+1)); -    j++; -  } -  return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (exp) bleu - * - * sum up exp(Ngram precisions) - */ -score_t -SumExpBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  unsigned M = N_; -  if (ref_len < N_) M = ref_len; -  score_t sum = 0.; -  unsigned j = 1; -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; -    sum += exp(((score_t)counts.clipped_[i]/counts.sum_[i]))/pow(2.0, (double) (N_-j+1)); -    j++; -  } -  return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (whatever) bleu - * - * sum up exp(weight * log(Ngram precisions)) - */ -score_t -SumWhateverBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned /*rank*/, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (hyp_len == 0 || ref_len == 0) return 0.; -  NgramCounts counts = make_ngram_counts(hyp, ref, N_); -  unsigned M = N_; -  vector<score_t> v = w_; -  if (ref_len < N_) { -    M = ref_len; -    for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); -  } -  score_t sum = 0.; -  unsigned j = 1; -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; -    sum += exp(v[i] * log(((score_t)counts.clipped_[i]/counts.sum_[i])))/pow(2.0, (double) (N_-j+1)); -    j++; -  } -  return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * approx. bleu - * - * as in "Online Large-Margin Training of Syntactic - *        and Structural Translation Features" - * (Chiang et al. '08) - * - * NOTE: Needs some more code in dtrain.cc . - *       No scaling by src len. - */ -score_t -ApproxBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned rank, const unsigned src_len) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (ref_len == 0) return 0.; -  score_t score = 0.; -  NgramCounts counts(N_); -  if (hyp_len > 0) { -    counts = make_ngram_counts(hyp, ref, N_); -    NgramCounts tmp = glob_onebest_counts_ + counts; -    score = Bleu(tmp, hyp_len, ref_len); -  } -  if (rank == 0) { // 'context of 1best translations' -    glob_onebest_counts_ += counts; -    glob_onebest_counts_ *= discount_; -    glob_hyp_len_ = discount_ * (glob_hyp_len_ + hyp_len); -    glob_ref_len_ = discount_ * (glob_ref_len_ + ref_len); -    glob_src_len_ = discount_ * (glob_src_len_ + src_len); -  } -  return score; -} - -/* - * Linear (Corpus) Bleu - * - * as in "Lattice Minimum Bayes-Risk Decoding - *        for Statistical Machine Translation" - * (Tromble et al. '08) - * - */ -score_t -LinearBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, -                        const unsigned rank, const unsigned /*src_len*/) -{ -  unsigned hyp_len = hyp.size(), ref_len = ref.size(); -  if (ref_len == 0) return 0.; -  unsigned M = N_; -  if (ref_len < N_) M = ref_len; -  NgramCounts counts(M); -  if (hyp_len > 0) -    counts = make_ngram_counts(hyp, ref, M); -  score_t ret = 0.; -  for (unsigned i = 0; i < M; i++) { -    if (counts.sum_[i] == 0 || onebest_counts_.sum_[i] == 0) break; -    ret += counts.sum_[i]/onebest_counts_.sum_[i]; -  } -  ret = -(hyp_len/(score_t)onebest_len_) + (1./M) * ret; -  if (rank == 0) { -    onebest_len_ += hyp_len; -    onebest_counts_ += counts; -  } -  return ret; -} - - -} // namespace - diff --git a/dtrain/score.h b/dtrain/score.h deleted file mode 100644 index f317c903..00000000 --- a/dtrain/score.h +++ /dev/null @@ -1,212 +0,0 @@ -#ifndef _DTRAIN_SCORE_H_ -#define _DTRAIN_SCORE_H_ - -#include "kbestget.h" - -using namespace std; - -namespace dtrain -{ - - -struct NgramCounts -{ -  unsigned N_; -  map<unsigned, score_t> clipped_; -  map<unsigned, score_t> sum_; - -  NgramCounts(const unsigned N) : N_(N) { Zero(); } - -  inline void -  operator+=(const NgramCounts& rhs) -  { -    if (rhs.N_ > N_) Resize(rhs.N_); -    for (unsigned i = 0; i < N_; i++) { -      this->clipped_[i] += rhs.clipped_.find(i)->second; -      this->sum_[i] += rhs.sum_.find(i)->second; -    } -  } - -  inline const NgramCounts -  operator+(const NgramCounts &other) const -  { -    NgramCounts result = *this; -    result += other; -    return result; -  } - -  inline void -  operator*=(const score_t rhs) -  { -    for (unsigned i = 0; i < N_; i++) { -      this->clipped_[i] *= rhs; -      this->sum_[i] *= rhs; -    } -  } - -  inline void -  Add(const unsigned count, const unsigned ref_count, const unsigned i) -  { -    assert(i < N_); -    if (count > ref_count) { -      clipped_[i] += ref_count; -    } else { -      clipped_[i] += count; -    } -    sum_[i] += count; -  } - -  inline void -  Zero() -  { -    for (unsigned i = 0; i < N_; i++) { -      clipped_[i] = 0.; -      sum_[i] = 0.; -    } -  } - -  inline void -  One() -  { -    for (unsigned i = 0; i < N_; i++) { -      clipped_[i] = 1.; -      sum_[i] = 1.; -    } -  } - -  inline void -  Print() -  { -    for (unsigned i = 0; i < N_; i++) { -      cout << i+1 << "grams (clipped):\t" << clipped_[i] << endl; -      cout << i+1 << "grams:\t\t\t" << sum_[i] << endl; -    } -  } - -  inline void Resize(unsigned N) -  { -    if (N == N_) return; -    else if (N > N_) { -      for (unsigned i = N_; i < N; i++) { -        clipped_[i] = 0.; -        sum_[i] = 0.; -      } -    } else { // N < N_ -      for (unsigned i = N_-1; i > N-1; i--) { -        clipped_.erase(i); -        sum_.erase(i); -      } -    } -    N_ = N; -  } -}; - -typedef map<vector<WordID>, unsigned> Ngrams; - -inline Ngrams -make_ngrams(const vector<WordID>& s, const unsigned N) -{ -  Ngrams ngrams; -  vector<WordID> ng; -  for (size_t i = 0; i < s.size(); i++) { -    ng.clear(); -    for (unsigned j = i; j < min(i+N, s.size()); j++) { -      ng.push_back(s[j]); -      ngrams[ng]++; -    } -  } -  return ngrams; -} - -inline NgramCounts -make_ngram_counts(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned N) -{ -  Ngrams hyp_ngrams = make_ngrams(hyp, N); -  Ngrams ref_ngrams = make_ngrams(ref, N); -  NgramCounts counts(N); -  Ngrams::iterator it; -  Ngrams::iterator ti; -  for (it = hyp_ngrams.begin(); it != hyp_ngrams.end(); it++) { -    ti = ref_ngrams.find(it->first); -    if (ti != ref_ngrams.end()) { -      counts.Add(it->second, ti->second, it->first.size() - 1); -    } else { -      counts.Add(it->second, 0, it->first.size() - 1); -    } -  } -  return counts; -} - -struct BleuScorer : public LocalScorer -{ -  score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len); -  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct StupidBleuScorer : public LocalScorer -{ -  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SmoothBleuScorer : public LocalScorer -{ -  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumBleuScorer : public LocalScorer -{ -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumExpBleuScorer : public LocalScorer -{ -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct SumWhateverBleuScorer : public LocalScorer -{ -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); -}; - -struct ApproxBleuScorer : public BleuScorer -{ -  NgramCounts glob_onebest_counts_; -  unsigned glob_hyp_len_, glob_ref_len_, glob_src_len_; -  score_t discount_; - -  ApproxBleuScorer(unsigned N, score_t d) : glob_onebest_counts_(NgramCounts(N)), discount_(d) -  { -    glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0; -  } - -  inline void Reset() { -    glob_onebest_counts_.Zero(); -    glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0.; -  } - -  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len); -}; - -struct LinearBleuScorer : public BleuScorer -{ -  unsigned onebest_len_; -  NgramCounts onebest_counts_; - -  LinearBleuScorer(unsigned N) : onebest_len_(1), onebest_counts_(N) -  { -    onebest_counts_.One(); -  } - -  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/); - -  inline void Reset() { -    onebest_len_ = 1; -    onebest_counts_.One(); -  } -}; - - -} // namespace - -#endif - diff --git a/dtrain/test/example/README b/dtrain/test/example/README deleted file mode 100644 index 6937b11b..00000000 --- a/dtrain/test/example/README +++ /dev/null @@ -1,8 +0,0 @@ -Small example of input format for distributed training. -Call dtrain from cdec/dtrain/ with ./dtrain -c test/example/dtrain.ini . - -For this to work, undef 'DTRAIN_LOCAL' in dtrain.h -and recompile. - -Data is here: http://simianer.de/#dtrain - diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini deleted file mode 100644 index 6642107f..00000000 --- a/dtrain/test/example/cdec.ini +++ /dev/null @@ -1,24 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -scfg_max_span_limit=15 -intersection_strategy=cube_pruning -cubepruning_pop_limit=30 -feature_function=WordPenalty -feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz -# all currently working feature functions for translation: -# (with those features active that were used in the ACL paper) -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -feature_function=RuleIdentityFeatures -feature_function=RuleNgramFeatures -feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini deleted file mode 100644 index c8ac7c3f..00000000 --- a/dtrain/test/example/dtrain.ini +++ /dev/null @@ -1,22 +0,0 @@ -input=test/example/nc-wmt11.1k.gz    # use '-' for STDIN -output=-                             # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=VOID     # don't output weights -decoder_config=test/example/cdec.ini # config for cdec -# weights for these features will be printed on each iteration -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -tmp=/tmp -stop_after=10 # stop epoch after 10 inputs - -# interesting stuff -epochs=3                # run over input 3 times -k=100                   # use 100best lists -N=4                     # optimize (approx) BLEU4 -scorer=stupid_bleu      # use 'stupid' BLEU+1 -learning_rate=0.0001    # learning rate -gamma=0                 # use SVM reg -sample_from=kbest       # use kbest lists (as opposed to forest) -filter=uniq             # only unique entries in kbest (surface form) -pair_sampling=XYX -hi_lo=0.1               # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0        # minimum distance in BLEU (this will still only use pairs with diff > 0) -loss_margin=0 diff --git a/dtrain/test/example/expected-output b/dtrain/test/example/expected-output deleted file mode 100644 index 25d2c069..00000000 --- a/dtrain/test/example/expected-output +++ /dev/null @@ -1,125 +0,0 @@ -                cdec cfg 'test/example/cdec.ini' -feature: WordPenalty (no config parameters) -State is 0 bytes for feature WordPenalty -feature: KLanguageModel (with config parameters 'test/example/nc-wmt11.en.srilm.gz') -Loading the LM will be faster if you build a binary file. -Reading test/example/nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** -Loaded 5-gram KLM from test/example/nc-wmt11.en.srilm.gz (MapSize=49581) -State is 98 bytes for feature KLanguageModel test/example/nc-wmt11.en.srilm.gz -feature: RuleIdentityFeatures (no config parameters) -State is 0 bytes for feature RuleIdentityFeatures -feature: RuleNgramFeatures (no config parameters) -State is 0 bytes for feature RuleNgramFeatures -feature: RuleShape (no config parameters) -  Example feature: Shape_S00000_T00000 -State is 0 bytes for feature RuleShape -Seeding random number sequence to 1072059181 - -dtrain -Parameters: -                       k 100 -                       N 4 -                       T 3 -                 scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq' -           learning rate 0.0001 -                   gamma 0 -             loss margin 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'VOID' -                  l1 reg 0 'none' -                cdec cfg 'test/example/cdec.ini' -                   input 'test/example/nc-wmt11.1k.gz' -                  output '-' -              stop_after 10 -(a dot represents 10 inputs) -Iteration #1 of 3. - . 10 -Stopping after 10 input sentences. -WEIGHTS -              Glue = -0.0293 -       WordPenalty = +0.049075 -     LanguageModel = +0.24345 - LanguageModel_OOV = -0.2029 -     PhraseModel_0 = +0.0084102 -     PhraseModel_1 = +0.021729 -     PhraseModel_2 = +0.014922 -     PhraseModel_3 = +0.104 -     PhraseModel_4 = -0.14308 -     PhraseModel_5 = +0.0247 -     PhraseModel_6 = -0.012 -       PassThrough = -0.2161 -        --- -       1best avg score: 0.16872 (+0.16872) - 1best avg model score: -1.8276 (-1.8276) -           avg # pairs: 1121.1 -        avg # rank err: 555.6 -     avg # margin viol: 0 -    non0 feature count: 277 -           avg list sz: 77.2 -           avg f count: 90.96 -(time 0.1 min, 0.6 s/S) - -Iteration #2 of 3. - . 10 -WEIGHTS -              Glue = -0.3526 -       WordPenalty = +0.067576 -     LanguageModel = +1.155 - LanguageModel_OOV = -0.2728 -     PhraseModel_0 = -0.025529 -     PhraseModel_1 = +0.095869 -     PhraseModel_2 = +0.094567 -     PhraseModel_3 = +0.12482 -     PhraseModel_4 = -0.36533 -     PhraseModel_5 = +0.1068 -     PhraseModel_6 = -0.1517 -       PassThrough = -0.286 -        --- -       1best avg score: 0.18394 (+0.015221) - 1best avg model score: 3.205 (+5.0326) -           avg # pairs: 1168.3 -        avg # rank err: 594.8 -     avg # margin viol: 0 -    non0 feature count: 543 -           avg list sz: 77.5 -           avg f count: 85.916 -(time 0.083 min, 0.5 s/S) - -Iteration #3 of 3. - . 10 -WEIGHTS -              Glue = -0.392 -       WordPenalty = +0.071963 -     LanguageModel = +0.81266 - LanguageModel_OOV = -0.4177 -     PhraseModel_0 = -0.2649 -     PhraseModel_1 = -0.17931 -     PhraseModel_2 = +0.038261 -     PhraseModel_3 = +0.20261 -     PhraseModel_4 = -0.42621 -     PhraseModel_5 = +0.3198 -     PhraseModel_6 = -0.1437 -       PassThrough = -0.4309 -        --- -       1best avg score: 0.2962 (+0.11225) - 1best avg model score: -36.274 (-39.479) -           avg # pairs: 1109.6 -        avg # rank err: 515.9 -     avg # margin viol: 0 -    non0 feature count: 741 -           avg list sz: 77 -           avg f count: 88.982 -(time 0.083 min, 0.5 s/S) - -Writing weights file to '-' ... -done - ---- -Best iteration: 3 [SCORE 'stupid_bleu'=0.2962]. -This took 0.26667 min. diff --git a/dtrain/test/toy/cdec.ini b/dtrain/test/toy/cdec.ini deleted file mode 100644 index 98b02d44..00000000 --- a/dtrain/test/toy/cdec.ini +++ /dev/null @@ -1,2 +0,0 @@ -formalism=scfg -add_pass_through_rules=true diff --git a/dtrain/test/toy/dtrain.ini b/dtrain/test/toy/dtrain.ini deleted file mode 100644 index a091732f..00000000 --- a/dtrain/test/toy/dtrain.ini +++ /dev/null @@ -1,12 +0,0 @@ -decoder_config=test/toy/cdec.ini -input=test/toy/input -output=- -print_weights=logp shell_rule house_rule small_rule little_rule PassThrough -k=4 -N=4 -epochs=2 -scorer=bleu -sample_from=kbest -filter=uniq -pair_sampling=all -learning_rate=1 diff --git a/dtrain/test/toy/input b/dtrain/test/toy/input deleted file mode 100644 index 4d10a9ea..00000000 --- a/dtrain/test/toy/input +++ /dev/null @@ -1,2 +0,0 @@ -0	ich sah ein kleines haus	i saw a little house	[S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0	[NP] ||| ich ||| i ||| logp=0	[NP] ||| ein [NN,1] ||| a [1] ||| logp=0	[NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1	[NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1	[JJ] ||| kleines ||| small ||| logp=0 small_rule=1	[JJ] ||| kleines ||| little ||| logp=0 little_rule=1	[JJ] ||| grosses ||| big ||| logp=0	[JJ] ||| grosses ||| large ||| logp=0	[VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0	[V] ||| sah ||| saw ||| logp=0	[V] ||| fand ||| found ||| logp=0 -1	ich fand ein kleines haus	i found a little house	[S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0	[NP] ||| ich ||| i ||| logp=0	[NP] ||| ein [NN,1] ||| a [1] ||| logp=0	[NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1	[NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1	[JJ] ||| kleines ||| small ||| logp=0 small_rule=1	[JJ] ||| kleines ||| little ||| logp=0 little_rule=1	[JJ] ||| grosses ||| big ||| logp=0	[JJ] ||| grosses ||| large ||| logp=0	[VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0	[V] ||| sah ||| saw ||| logp=0	[V] ||| fand ||| found ||| logp=0  | 
