diff options
Diffstat (limited to 'training')
46 files changed, 933 insertions, 1577 deletions
| diff --git a/training/dtrain/Makefile.am b/training/dtrain/Makefile.am index 7717ec86..aadd376d 100644 --- a/training/dtrain/Makefile.am +++ b/training/dtrain/Makefile.am @@ -1,6 +1,6 @@  bin_PROGRAMS = dtrain -dtrain_SOURCES = dtrain.cc dtrain.h sample.h pairs.h score.h +dtrain_SOURCES = dtrain.cc dtrain.h sample.h update.h score.h  dtrain_LDADD   = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a  AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc index 69630206..1b7047b0 100644 --- a/training/dtrain/dtrain.cc +++ b/training/dtrain/dtrain.cc @@ -1,541 +1,207 @@  #include "dtrain.h"  #include "score.h"  #include "sample.h" -#include "pairs.h" +#include "update.h"  using namespace dtrain; - -bool -dtrain_init(int argc, char** argv, po::variables_map* conf) -{ -  po::options_description ini("Configuration File Options"); -  ini.add_options() -    ("bitext,b",          po::value<string>(),                                            "bitext: 'src ||| tgt ||| tgt ||| ...'") -    ("output,o",          po::value<string>()->default_value("-"),                          "output weights file, '-' for STDOUT") -    ("input_weights,w",   po::value<string>(),                                "input weights file (e.g. from previous iteration)") -    ("decoder_config,d",  po::value<string>(),                                                      "configuration file for cdec") -    ("print_weights",     po::value<string>(),                                               "weights to print on each iteration") -    ("stop_after",        po::value<unsigned>()->default_value(0),                                 "stop after X input sentences") -    ("keep",              po::value<bool>()->zero_tokens(),                               "keep weights files for each iteration") -    ("epochs",            po::value<unsigned>()->default_value(10),                               "# of iterations T (per shard)") -    ("k",                 po::value<unsigned>()->default_value(100),                            "how many translations to sample") -    ("filter",            po::value<string>()->default_value("uniq"),                          "filter kbest list: 'not', 'uniq'") -    ("hi_lo",             po::value<float>()->default_value(0.1),                   "hi and lo (X) for XYX (default 0.1), <= 0.5") -    ("N",                 po::value<unsigned>()->default_value(4),                                          "N for Ngrams (BLEU)") -    ("scorer",            po::value<string>()->default_value("stupid_bleu"),      "scoring: bleu, stupid_, smooth_, approx_, lc_") -    ("learning_rate",     po::value<weight_t>()->default_value(1.0),                                              "learning rate") -    ("gamma",             po::value<weight_t>()->default_value(0.),                            "gamma for SVM (0 for perceptron)") -    ("select_weights",    po::value<string>()->default_value("last"),     "output best, last, avg weights ('VOID' to throw away)") -    ("rescale",           po::value<bool>()->zero_tokens(),                     "(re)scale data and weight vector to unit length") -    ("l1_reg",            po::value<string>()->default_value("none"),      "apply l1 regularization with clipping as in 'Tsuroka et al' (2010)") -    ("l1_reg_strength",   po::value<weight_t>(),                                                     "l1 regularization strength") -    ("fselect",           po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO -    ("loss_margin",       po::value<weight_t>()->default_value(0.),  "update if no error in pref pair but model scores this near") -    ("pclr",              po::value<string>()->default_value("no"),         "use a (simple|adagrad) per-coordinate learning rate") -    ("batch",             po::value<bool>()->zero_tokens(),                                               "do batch optimization") -    ("repeat",            po::value<unsigned>()->default_value(1),     "repeat optimization over kbest list this number of times") -    ("output_ranking",    po::value<string>()->default_value(""),                                   "output scored kbests to dir") -    ("noup",              po::value<bool>()->zero_tokens(),                                                     "dont't optimize"); -  po::options_description cl("Command Line Options"); -  cl.add_options() -    ("config,c",         po::value<string>(),              "dtrain config file") -    ("quiet,q",          po::value<bool>()->zero_tokens(),           "be quiet") -    ("verbose,v",        po::value<bool>()->zero_tokens(),         "be verbose"); -  cl.add(ini); -  po::store(parse_command_line(argc, argv, cl), *conf); -  if (conf->count("config")) { -    ifstream ini_f((*conf)["config"].as<string>().c_str()); -    po::store(po::parse_config_file(ini_f, ini), *conf); -  } -  po::notify(*conf); -  if (!conf->count("decoder_config")) { -    cerr << cl << endl; -    return false; -  } -  if ((*conf)["hi_lo"].as<float>() > 0.5 || (*conf)["hi_lo"].as<float>() < 0.01) { -    cerr << "hi_lo must lie in [0.01, 0.5]" << endl; -    return false; -  } -  if (!conf->count("bitext")) { -    cerr << "No training data given." << endl; -    return false; -  } -  if ((*conf)["select_weights"].as<string>() != "last" && (*conf)["select_weights"].as<string>() != "best" && -        (*conf)["select_weights"].as<string>() != "avg" && (*conf)["select_weights"].as<string>() != "VOID") { -    cerr << "Wrong 'select_weights' param: '" << (*conf)["select_weights"].as<string>() << "', use 'last' or 'best'." << endl; -    return false; -  } -  return true; -} -  int  main(int argc, char** argv)  { -  // handle most parameters +  // get configuration    po::variables_map conf; -  if (!dtrain_init(argc, argv, &conf)) exit(1); // something is wrong - -  bool quiet = false; -  if (conf.count("quiet")) quiet = true; -  bool verbose = false; -  if (conf.count("verbose")) verbose = true; -  bool noup = false; -  if (conf.count("noup")) noup = true; -  bool keep = false; -  if (conf.count("keep")) keep = true; -  bool rescale = false; -  if (conf.count("rescale")) rescale = true; - -  const unsigned k = conf["k"].as<unsigned>(); -  const unsigned N = conf["N"].as<unsigned>(); -  const unsigned T = conf["epochs"].as<unsigned>(); -  const unsigned stop_after = conf["stop_after"].as<unsigned>(); -  const string select_weights = conf["select_weights"].as<string>(); -  const string output_ranking = conf["output_ranking"].as<string>(); -  const float hi_lo = conf["hi_lo"].as<float>(); -  int repeat = conf["repeat"].as<unsigned>(); -  weight_t loss_margin = conf["loss_margin"].as<weight_t>(); -  bool batch = false; -  if (conf.count("batch")) batch = true; -  if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max(); -  const string pclr = conf["pclr"].as<string>(); -  bool average = false; -  if (select_weights == "avg") -    average = true; +  if (!dtrain_init(argc, argv, &conf)) +    exit(1); // something is wrong +  const size_t k              = conf["k"].as<size_t>(); +  const size_t N              = conf["N"].as<size_t>(); +  const size_t T              = conf["iterations"].as<size_t>(); +  const weight_t eta          = conf["learning_rate"].as<weight_t>(); +  const weight_t error_margin = conf["error_margin"].as<weight_t>(); +  const bool average          = conf["average"].as<bool>(); +  const bool keep             = conf["keep"].as<bool>(); +  const weight_t l1_reg       = conf["l1_reg"].as<weight_t>(); +  const string output_fn      = conf["output"].as<string>();    vector<string> print_weights; -  if (conf.count("print_weights")) -    boost::split(print_weights, conf["print_weights"].as<string>(), boost::is_any_of(" ")); +  boost::split(print_weights, conf["print_weights"].as<string>(), boost::is_any_of(" "));    // setup decoder    register_feature_functions();    SetSilent(true); -  ReadFile ini_rf(conf["decoder_config"].as<string>()); -  if (!quiet) -    cerr << setw(25) << "cdec conf " << "'" << conf["decoder_config"].as<string>() << "'" << endl; -  Decoder decoder(ini_rf.stream()); +  ReadFile f(conf["decoder_config"].as<string>()); +  Decoder decoder(f.stream());    // setup decoder observer    ScoredKbest* observer = new ScoredKbest(k, new PerSentenceBleuScorer(N)); -  // init weights +  // weights    vector<weight_t>& decoder_weights = decoder.CurrentWeightVector(); - -  SparseVector<weight_t> lambdas, cumulative_penalties, w_average, fixed; -  if (conf.count("input_weights")) +  SparseVector<weight_t> lambdas, w_average; +  if (conf.count("input_weights")) {      Weights::InitFromFile(conf["input_weights"].as<string>(), &decoder_weights); -  Weights::InitSparseVector(decoder_weights, &lambdas); - -  // meta params for perceptron, SVM -  weight_t eta = conf["learning_rate"].as<weight_t>(); -  weight_t gamma = conf["gamma"].as<weight_t>(); - -  // faster perceptron: consider only misranked pairs, see -  bool faster_perceptron = false; -  if (gamma==0 && loss_margin==0) faster_perceptron = true; - -  // l1 regularization -  bool l1naive = false; -  bool l1clip = false; -  bool l1cumul = false; -  weight_t l1_reg = 0; -  if (conf["l1_reg"].as<string>() != "none") { -    string s = conf["l1_reg"].as<string>(); -    if (s == "naive") l1naive = true; -    else if (s == "clip") l1clip = true; -    else if (s == "cumul") l1cumul = true; -    l1_reg = conf["l1_reg_strength"].as<weight_t>(); +    Weights::InitSparseVector(decoder_weights, &lambdas);    } -  // output -  string output_fn = conf["output"].as<string>();    // input -  string input_fn; -  ReadFile input(conf["bitext"].as<string>()); -  // buffer input for t > 0 -  vector<string> src_str_buf;          // source strings (decoder takes only strings) -  vector<vector<vector<WordID> > > refs_as_ids_buf; // references as WordID vecs - -  unsigned in_sz = std::numeric_limits<unsigned>::max(); // input index, input size -  vector<pair<score_t, score_t> > all_scores; -  score_t max_score = 0.; -  unsigned best_it = 0; -  float overall_time = 0.; - -  // output conf -  if (!quiet) { -    cerr << _p5; -    cerr << endl << "dtrain" << endl << "Parameters:" << endl; -    cerr << setw(25) << "k " << k << endl; -    cerr << setw(25) << "N " << N << endl; -    cerr << setw(25) << "T " << T << endl; -    cerr << setw(25) << "batch " << batch << endl; -    cerr << setw(25) << "learning rate " << eta << endl; -    cerr << setw(25) << "gamma " << gamma << endl; -    cerr << setw(25) << "loss margin " << loss_margin << endl; -    cerr << setw(25) << "faster perceptron " << faster_perceptron << endl; -    cerr << setw(25) << "hi lo " << hi_lo << endl; -    cerr << setw(25) << "select weights " << "'" << select_weights << "'" << endl; -    if (conf.count("l1_reg")) -      cerr << setw(25) << "l1 reg " << l1_reg << " '" << conf["l1_reg"].as<string>() << "'" << endl; -    if (rescale) -      cerr << setw(25) << "rescale " << rescale << endl; -    cerr << setw(25) << "pclr " << pclr << endl; -    cerr << setw(25) << "repeat " << repeat << endl; -    cerr << setw(25) << "cdec conf " << "'" << conf["decoder_config"].as<string>() << "'" << endl; -    cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; -    cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; -    if (conf.count("input_weights")) -      cerr << setw(25) << "weights in " << "'" << conf["input_weights"].as<string>() << "'" << endl; -    if (stop_after > 0) -      cerr << setw(25) << "stop_after " << stop_after << endl; -    if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl; -  } +  string input_fn = conf["bitext"].as<string>(); +  ReadFile input(input_fn); +  vector<string> buf;              // source strings (decoder takes only strings) +  vector<vector<Ngrams> > buf_ngs;  // compute ngrams and lengths of references +  vector<vector<size_t> > buf_ls; // just once +  size_t input_sz = 0; + +  // output configuration +  cerr << _p5 << "dtrain" << endl << "Parameters:" << endl; +  cerr << setw(25) << "k " << k << endl; +  cerr << setw(25) << "N " << N << endl; +  cerr << setw(25) << "T " << T << endl; +  cerr << setw(25) << "learning rate " << eta << endl; +  cerr << setw(25) << "error margin " << error_margin << endl; +  cerr << setw(25) << "l1 reg " << l1_reg << endl; +  cerr << setw(25) << "decoder conf " << "'" << conf["decoder_config"].as<string>() << "'" << endl; +  cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; +  cerr << setw(25) << "output " << "'" << output_fn << "'" << endl; +  if (conf.count("input_weights")) +    cerr << setw(25) << "weights in " << "'" << conf["input_weights"].as<string>() << "'" << endl; +  cerr << "(a dot per input)" << endl; -  // pclr -  SparseVector<weight_t> learning_rates; -  // batch -  SparseVector<weight_t> batch_updates; -  score_t batch_loss; +  // meta +  weight_t best=0., gold_prev=0.; +  size_t best_iteration = 0; +  time_t total_time = 0.; -  for (unsigned t = 0; t < T; t++) // T epochs +  for (size_t t = 0; t < T; t++) // T iterations    {    time_t start, end;    time(&start); -  score_t score_sum = 0.; -  score_t model_sum(0); -  unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0, kbest_loss_improve = 0; -  batch_loss = 0.; -  if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl; +  weight_t gold_sum=0., model_sum=0.; +  size_t i = 0, num_pairs = 0, feature_count = 0, list_sz = 0; + +  cerr << "Iteration #" << t+1 << " of " << T << "." << endl;    while(true)    { +    bool next = true; -    string in; -    vector<string> refs; -    bool next = false, stop = false; // next iteration or premature stop +    // getting input      if (t == 0) { -      if(!getline(*input, in)) next = true; -        boost::algorithm::split_regex(refs, in, boost::regex(" \\|\\|\\| ")); -        in = refs[0]; -        refs.erase(refs.begin()); -    } else { -      if (ii == in_sz) next = true; // stop if we reach the end of our input -    } -    // stop after X sentences (but still go on for those) -    if (stop_after > 0 && stop_after == ii && !next) stop = true; - -    // produce some pretty output -    if (!quiet && !verbose) { -      if (ii == 0) cerr << " "; -      if ((ii+1) % (DTRAIN_DOTS) == 0) { -        cerr << "."; -        cerr.flush(); -      } -      if ((ii+1) % (20*DTRAIN_DOTS) == 0) { -        cerr << " " << ii+1 << endl; -        if (!next && !stop) cerr << " "; -      } -      if (stop) { -        if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; -        cerr << "Stopping after " << stop_after << " input sentences." << endl; +      string in; +      if(!getline(*input, in)) { +        next = false;        } else { -        if (next) { -          if (ii % (20*DTRAIN_DOTS) != 0) cerr << " " << ii << endl; +        vector<string> parts; +        boost::algorithm::split_regex(parts, in, boost::regex(" \\|\\|\\| ")); +        buf.push_back(parts[0]); +        parts.erase(parts.begin()); +        buf_ngs.push_back({}); +        buf_ls.push_back({}); +        for (auto s: parts) { +          vector<WordID> r; +          vector<string> tok; +          boost::split(tok, s, boost::is_any_of(" ")); +          RegisterAndConvert(tok, r); +          buf_ngs.back().emplace_back(MakeNgrams(r, N)); +          buf_ls.back().push_back(r.size());          }        } +    } else { +      next = i<input_sz;      } -    // next iteration -    if (next || stop) break; - -    // weights -    lambdas.init_vector(&decoder_weights); - -    // getting input -    if (t == 0) { -      vector<vector<WordID> > cur_refs; -      for (auto r: refs) { -        vector<WordID> cur_ref; -        vector<string> tok; -        boost::split(tok, r, boost::is_any_of(" ")); -        RegisterAndConvert(tok, cur_ref); -        cur_refs.push_back(cur_ref); -      } -      refs_as_ids_buf.push_back(cur_refs); -      src_str_buf.push_back(in); -    } -    observer->SetReference(refs_as_ids_buf[ii]); -    if (t == 0) -      decoder.Decode(in, observer); -    else -      decoder.Decode(src_str_buf[ii], observer); - -    // get (scored) samples +    // produce some pretty output +    if (i == 0 || (i+1)%20==0) +      cerr << " "; +    cerr << "."; +    cerr.flush(); +    if (!next) +      if (i%20 != 0) cerr << " " << i << endl; + +    // stop iterating +    if (!next) break; + +    // decode +    if (t > 0 || i > 0) +      lambdas.init_vector(&decoder_weights); +    observer->SetReference(buf_ngs[i], buf_ls[i]); +    decoder.Decode(buf[i], observer);      vector<ScoredHyp>* samples = observer->GetSamples(); -    if (output_ranking != "") { -      WriteFile of(output_ranking+"/"+to_string(t)+"."+to_string(ii)+".list"); // works with '-' -      stringstream ss; -      for (auto s: *samples) { -        ss << ii << " ||| "; -        PrintWordIDVec(s.w, ss); -        ss << " ||| " << s.model << " ||| " << s.score << endl; -      } -      of.get() << ss.str(); -    } - -    if (verbose) { -      cerr << "--- refs for " << ii << ": "; -      for (auto r: refs_as_ids_buf[ii]) { -        PrintWordIDVec(r); -        cerr << endl; -      } -      for (unsigned u = 0; u < samples->size(); u++) { -        cerr << _p2 << _np << "[" << u << ". '"; -        PrintWordIDVec((*samples)[u].w); -        cerr << "'" << endl; -        cerr << "SCORE=" << (*samples)[u].score << ",model="<< (*samples)[u].model << endl; -        cerr << "F{" << (*samples)[u].f << "} ]" << endl << endl; -      } -    } - -    if (repeat == 1) { -      score_sum += (*samples)[0].score; // stats for 1best -      model_sum += (*samples)[0].model; -    } - -    f_count += observer->GetFeatureCount(); +    // stats for 1best +    gold_sum += samples->front().gold; +    model_sum += samples->front().model; +    feature_count += observer->GetFeatureCount();      list_sz += observer->GetSize(); -    // weight updates -    if (!noup) { -      // get pairs -      vector<pair<ScoredHyp,ScoredHyp> > pairs; -      MakePairs(samples, pairs, faster_perceptron, hi_lo); -      int cur_npairs = pairs.size(); -      npairs += cur_npairs; - -      score_t kbest_loss_first = 0.0, kbest_loss_last = 0.0; - -      for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); -           it != pairs.end(); it++) { -        if (rescale) { -          it->first.f /= it->first.f.l2norm(); -          it->second.f /= it->second.f.l2norm(); -        } -        score_t model_diff = it->first.model - it->second.model; -        score_t loss = max(0.0, -1.0 * model_diff); -        kbest_loss_first += loss; -      } - -      score_t kbest_loss = 0.0; -      for (int ki=0; ki < repeat; ki++) { - -      SparseVector<weight_t> lambdas_copy; // for l1 regularization -      SparseVector<weight_t> sum_up; // for pclr -      if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas; - -      for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); -           it != pairs.end(); it++) { -        score_t model_diff = it->first.model - it->second.model; -        score_t loss = max(0.0, -1.0 * model_diff); - -        if (repeat > 1) { -          model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f); -          kbest_loss += loss; -        } -        bool rank_error = false; -        score_t margin; -        if (faster_perceptron) { // we only have considering misranked pairs -          rank_error = true; // pair sampling already did this for us -          margin = std::numeric_limits<float>::max(); -        } else { -          rank_error = model_diff<=0.0; -          margin = fabs(model_diff); -          if (!rank_error && margin < loss_margin) margin_violations++; -        } -        if (rank_error && ki==0) rank_errors++; -        if (rank_error || margin < loss_margin) { -          SparseVector<weight_t> diff_vec = it->first.f - it->second.f; -          if (batch) { -            batch_loss += max(0., -1.0 * model_diff); -            batch_updates += diff_vec; -            continue; -          } -          if (pclr != "no") { -            sum_up += diff_vec; +    // get pairs and update +    vector<pair<ScoredHyp,ScoredHyp> > pairs; +    SparseVector<weight_t> updates; +    num_pairs += CollectUpdates(samples, updates, error_margin); +    SparseVector<weight_t> lambdas_copy; +    if (l1_reg) +      lambdas_copy = lambdas; +    lambdas.plus_eq_v_times_s(updates, eta); + +    // l1 regularization +    // NB: regularization is done after each sentence, +    //     not after every single pair! +    if (l1_reg) { +      SparseVector<weight_t>::iterator it = lambdas.begin(); +      for (; it != lambdas.end(); ++it) { +        if (it->second == 0) continue; +        if (!lambdas_copy.get(it->first)                // new or.. +            || lambdas_copy.get(it->first)!=it->second) // updated feature +        { +          weight_t v = it->second; +          if (v > 0) { +            it->second = max(0., v - l1_reg);            } else { -            lambdas.plus_eq_v_times_s(diff_vec, eta); -            if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./cur_npairs)); -          } -        } -      } - -      // per-coordinate learning rate -      if (pclr != "no") { -        SparseVector<weight_t>::iterator it = sum_up.begin(); -        for (; it != sum_up.end(); ++it) { -          if (pclr == "simple") { -           lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]); -           learning_rates[it->first]++; -          } else if (pclr == "adagrad") { -            if (learning_rates[it->first] == 0) { -             lambdas[it->first] +=  it->second * eta; -            } else { -             lambdas[it->first] +=  it->second * eta * learning_rates[it->first]; -            } -            learning_rates[it->first] += pow(it->second, 2.0); +            it->second = min(0., v + l1_reg);            }          }        } +    } -      // l1 regularization -      // please note that this regularizations happen -      // after a _sentence_ -- not after each example/pair! -      if (l1naive) { -        SparseVector<weight_t>::iterator it = lambdas.begin(); -        for (; it != lambdas.end(); ++it) { -          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { -              it->second *= max(0.0000001, eta/(eta+learning_rates[it->first])); // FIXME -              learning_rates[it->first]++; -            it->second -= sign(it->second) * l1_reg; -          } -        } -      } else if (l1clip) { -        SparseVector<weight_t>::iterator it = lambdas.begin(); -        for (; it != lambdas.end(); ++it) { -          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { -            if (it->second != 0) { -              weight_t v = it->second; -              if (v > 0) { -                it->second = max(0., v - l1_reg); -              } else { -                it->second = min(0., v + l1_reg); -              } -            } -          } -        } -      } else if (l1cumul) { -        weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input -        SparseVector<weight_t>::iterator it = lambdas.begin(); -        for (; it != lambdas.end(); ++it) { -          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { -            if (it->second != 0) { -              weight_t v = it->second; -              weight_t penalized = 0.; -              if (v > 0) { -                penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first))); -              } else { -                penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first))); -              } -              it->second = penalized; -              cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized); -            } -          } -        } -      } - -      if (ki==repeat-1) { // done -        kbest_loss_last = kbest_loss; -        if (repeat > 1) { -          score_t best_model = -std::numeric_limits<score_t>::max(); -          unsigned best_idx = 0; -          for (unsigned i=0; i < samples->size(); i++) { -            score_t s = lambdas.dot((*samples)[i].f); -            if (s > best_model) { -              best_idx = i; -              best_model = s; -            } -          } -          score_sum += (*samples)[best_idx].score; -          model_sum += best_model; -        } -      } -    } // repeat - -    if ((kbest_loss_first - kbest_loss_last) >= 0) kbest_loss_improve++; - -    } // noup - -    if (rescale) lambdas /= lambdas.l2norm(); - -    ++ii; +    i++;    } // input loop -  if (t == 0) in_sz = ii; // remember size of input (# lines) - -  if (batch) { -    lambdas.plus_eq_v_times_s(batch_updates, eta); -    if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); -    batch_updates.clear(); -  } - -  if (average) w_average += lambdas; - -  // print some stats -  score_t score_avg = score_sum/(score_t)in_sz; -  score_t model_avg = model_sum/(score_t)in_sz; -  score_t score_diff, model_diff; -  if (t > 0) { -    score_diff = score_avg - all_scores[t-1].first; -    model_diff = model_avg - all_scores[t-1].second; -  } else { -    score_diff = score_avg; -    model_diff = model_avg; +  if (t == 0) +    input_sz = i; // remember size of input (# lines) + +  // update average +  if (average) +    w_average += lambdas; + +  // stats +  weight_t gold_avg = gold_sum/(weight_t)input_sz; +  size_t non_zero = (size_t)lambdas.num_nonzero(); +  cerr << _p5 << _p << "WEIGHTS" << endl; +  for (auto name: print_weights) +    cerr << setw(18) << name << " = " << lambdas.get(FD::Convert(name)) << endl; +  cerr << "        ---" << endl; +  cerr << _np << "       1best avg score: " << gold_avg; +  cerr << _p << " (" << gold_avg-gold_prev << ")" << endl; +  cerr << _np << " 1best avg model score: " << model_sum/(weight_t)input_sz << endl; +  cerr << "           avg # pairs: "; +  cerr << _np << num_pairs/(float)input_sz << endl; +  cerr << "   non-0 feature count: " <<  non_zero << endl; +  cerr << "           avg list sz: " << list_sz/(float)input_sz << endl; +  cerr << "           avg f count: " << feature_count/(float)list_sz << endl; + +  if (gold_avg > best) { +    best = gold_avg; +    best_iteration = t;    } +  gold_prev = gold_avg; -  unsigned nonz = 0; -  if (!quiet) nonz = (unsigned)lambdas.num_nonzero(); - -  if (!quiet) { -    cerr << _p5 << _p << "WEIGHTS" << endl; -    for (vector<string>::iterator it = print_weights.begin(); it != print_weights.end(); it++) { -      cerr << setw(18) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl; -    } -    cerr << "        ---" << endl; -    cerr << _np << "       1best avg score: " << score_avg; -    cerr << _p << " (" << score_diff << ")" << endl; -    cerr << _np << " 1best avg model score: " << model_avg; -    cerr << _p << " (" << model_diff << ")" << endl; -    cerr << "           avg # pairs: "; -    cerr << _np << npairs/(float)in_sz << endl; -    cerr << "        avg # rank err: "; -    cerr << rank_errors/(float)in_sz; -    if (faster_perceptron) cerr << " (meaningless)"; -    cerr << endl; -    cerr << "     avg # margin viol: "; -    cerr << margin_violations/(float)in_sz << endl; -    if (batch) cerr << "            batch loss: " << batch_loss << endl; -    cerr << "       k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl; -    cerr << "    non0 feature count: " <<  nonz << endl; -    cerr << "           avg list sz: " << list_sz/(float)in_sz << endl; -    cerr << "           avg f count: " << f_count/(float)list_sz << endl; -  } - -  pair<score_t,score_t> remember; -  remember.first = score_avg; -  remember.second = model_avg; -  all_scores.push_back(remember); -  if (score_avg > max_score) { -    max_score = score_avg; -    best_it = t; -  }    time (&end); -  float time_diff = difftime(end, start); -  overall_time += time_diff; -  if (!quiet) { -    cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; -    cerr << time_diff/in_sz << " s/S)" << endl; -  } -  if (t+1 != T && !quiet) cerr << endl; - -  if (noup) break; +  time_t time_diff = difftime(end, start); +  total_time += time_diff; +  cerr << _p2 << _np << "(time " << time_diff/60. << " min, "; +  cerr << time_diff/input_sz << " s/S)" << endl; +  if (t+1 != T) cerr << endl; -  // write weights to file -  if (select_weights == "best" || keep) { +  if (keep) { // keep intermediate weights      lambdas.init_vector(&decoder_weights);      string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz";      Weights::WriteToFile(w_fn, decoder_weights, true); @@ -543,51 +209,19 @@ main(int argc, char** argv)    } // outer loop -  if (average) w_average /= (weight_t)T; - -  if (!noup) { -    if (!quiet) cerr << endl << "Writing weights file to '" << output_fn << "' ..." << endl; -    if (select_weights == "last" || average) { // last, average -      WriteFile of(output_fn); -      ostream& o = *of.stream(); -      o.precision(17); -      o << _np; -      if (average) { -        for (SparseVector<weight_t>::iterator it = w_average.begin(); it != w_average.end(); ++it) { -	      if (it->second == 0) continue; -          o << FD::Convert(it->first) << '\t' << it->second << endl; -        } -      } else { -        for (SparseVector<weight_t>::iterator it = lambdas.begin(); it != lambdas.end(); ++it) { -	      if (it->second == 0) continue; -          o << FD::Convert(it->first) << '\t' << it->second << endl; -        } -      } -    } else if (select_weights == "VOID") { // do nothing with the weights -    } else { // best -      if (output_fn != "-") { -        CopyFile("weights."+boost::lexical_cast<string>(best_it)+".gz", output_fn); -      } else { -        ReadFile bestw("weights."+boost::lexical_cast<string>(best_it)+".gz"); -        string o; -        cout.precision(17); -        cout << _np; -        while(getline(*bestw, o)) cout << o << endl; -      } -      if (!keep) { -        for (unsigned i = 0; i < T; i++) { -          string s = "weights." + boost::lexical_cast<string>(i) + ".gz"; -          unlink(s.c_str()); -        } -      } -    } -    if (!quiet) cerr << "done" << endl; +  // final weights +  if (average) { +    w_average /= (weight_t)T; +    w_average.init_vector(decoder_weights); +  } else if (!keep) { +    lambdas.init_vector(decoder_weights);    } +  Weights::WriteToFile(output_fn, decoder_weights, true); -  if (!quiet) { -    cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; -    cerr << best_it+1 << " [SCORE = " << max_score << "]." << endl; -    cerr << "This took " << overall_time/60. << " min." << endl; -  } +  cerr << _p5 << _np << endl << "---" << endl << "Best iteration: "; +  cerr << best_iteration+1 << " [GOLD = " << best << "]." << endl; +  cerr << "This took " << total_time/60. << " min." << endl; + +  return 0;  } diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h index 2b466930..728b0698 100644 --- a/training/dtrain/dtrain.h +++ b/training/dtrain/dtrain.h @@ -1,9 +1,6 @@  #ifndef _DTRAIN_H_  #define _DTRAIN_H_ -#define DTRAIN_DOTS 10 // after how many inputs to display a '.' -#define DTRAIN_SCALE 100000 -  #include <iomanip>  #include <climits>  #include <string.h> @@ -25,20 +22,17 @@ namespace po = boost::program_options;  namespace dtrain  { -typedef double score_t; -  struct ScoredHyp  { -  vector<WordID> w; +  vector<WordID>         w;    SparseVector<weight_t> f; -  score_t model, score; -  unsigned rank; +  weight_t               model, gold; +  size_t               rank;  };  inline void  RegisterAndConvert(const vector<string>& strs, vector<WordID>& ids)  { -  vector<string>::const_iterator it;    for (auto s: strs)      ids.push_back(TD::Convert(s));  } @@ -46,7 +40,7 @@ RegisterAndConvert(const vector<string>& strs, vector<WordID>& ids)  inline void  PrintWordIDVec(vector<WordID>& v, ostream& os=cerr)  { -  for (unsigned i = 0; i < v.size(); i++) { +  for (size_t i = 0; i < v.size(); i++) {      os << TD::Convert(v[i]);      if (i < v.size()-1) os << " ";    } @@ -57,12 +51,45 @@ inline ostream& _p(ostream& out)  { return out << setiosflags(ios::showpos); }  inline ostream& _p2(ostream& out) { return out << setprecision(2); }  inline ostream& _p5(ostream& out) { return out << setprecision(5); } -template<typename T> -inline T -sign(T z) +bool +dtrain_init(int argc, char** argv, po::variables_map* conf)  { -  if (z == 0) return 0; -  return z < 0 ? -1 : +1; +  po::options_description ini("Configuration File Options"); +  ini.add_options() +    ("bitext,b",          po::value<string>(),                                                   "bitext") +    ("decoder_config,C",  po::value<string>(),                           "configuration file for decoder") +    ("iterations,T",      po::value<size_t>()->default_value(10),    "number of iterations T (per shard)") +    ("k",                 po::value<size_t>()->default_value(100),                   "size of kbest list") +    ("learning_rate,l",   po::value<weight_t>()->default_value(1.0),                      "learning rate") +    ("l1_reg,r",          po::value<weight_t>()->default_value(0.),          "l1 regularization strength") +    ("error_margin,m",    po::value<weight_t>()->default_value(0.),        "margin for margin perceptron") +    ("N",                 po::value<size_t>()->default_value(4),               "N for BLEU approximation") +    ("input_weights,w",   po::value<string>(),                                       "input weights file") +    ("average,a",         po::value<bool>()->default_value(false),               "output average weights") +    ("keep,K",            po::value<bool>()->default_value(false),   "output a weight file per iteration") +    ("output,o",          po::value<string>()->default_value("-"),  "output weights file, '-' for STDOUT") +    ("print_weights,P",   po::value<string>()->default_value("EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV"), +                                                          "list of weights to print after each iteration"); +  po::options_description cl("Command Line Options"); +  cl.add_options() +    ("config,c", po::value<string>(), "dtrain config file"); +  cl.add(ini); +  po::store(parse_command_line(argc, argv, cl), *conf); +  if (conf->count("config")) { +    ifstream f((*conf)["config"].as<string>().c_str()); +    po::store(po::parse_config_file(f, ini), *conf); +  } +  po::notify(*conf); +  if (!conf->count("decoder_config")) { +    cerr << "Missing decoder configuration." << endl; +    return false; +  } +  if (!conf->count("bitext")) { +    cerr << "No training data given." << endl; +    return false; +  } + +  return true;  }  } // namespace diff --git a/training/dtrain/examples/parallelized/README b/training/dtrain/examples/parallelized/README index 2fb3b54e..c4addd81 100644 --- a/training/dtrain/examples/parallelized/README +++ b/training/dtrain/examples/parallelized/README @@ -1,5 +1,5 @@  run for example -  ../../parallelize.rb -c dtrain.ini -s 4 -e 3 -z -d ../../dtrain -p 2 -i in +  ../../parallelize.rb -c dtrain.ini -s 4 -e 3 -d ../../dtrain -p 2 -i in  final weights will be in the file work/weights.2 diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini index 0b0932d6..9fc205a3 100644 --- a/training/dtrain/examples/parallelized/dtrain.ini +++ b/training/dtrain/examples/parallelized/dtrain.ini @@ -1,14 +1,7 @@  k=100  N=4  learning_rate=0.0001 -gamma=0 -loss_margin=1.0 -epochs=1 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -hi_lo=0.1 -select_weights=last -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough +error_margin=1.0 +iterations=1  decoder_config=cdec.ini +print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0 index 9154c906..77749404 100644 --- a/training/dtrain/examples/parallelized/work/out.0.0 +++ b/training/dtrain/examples/parallelized/work/out.0.0 @@ -1,65 +1,43 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 4087834873 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.0.0.in'                    output 'work/weights.0.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = +0.257 -       WordPenalty = +0.026926 -     LanguageModel = +0.67342 - LanguageModel_OOV = -0.046 -     PhraseModel_0 = +0.25329 -     PhraseModel_1 = +0.20036 -     PhraseModel_2 = +0.00060731 -     PhraseModel_3 = +0.65578 -     PhraseModel_4 = +0.47916 -     PhraseModel_5 = +0.004 -     PhraseModel_6 = +0.1829 -       PassThrough = -0.082 +              Glue = +0.3404 +       WordPenalty = -0.017632 +     LanguageModel = +0.72958 + LanguageModel_OOV = -0.235 +     PhraseModel_0 = -0.43721 +     PhraseModel_1 = +1.01 +     PhraseModel_2 = +1.3525 +     PhraseModel_3 = -0.25541 +     PhraseModel_4 = -0.78115 +     PhraseModel_5 = +0 +     PhraseModel_6 = -0.3681 +       PassThrough = -0.3304          --- -       1best avg score: 0.04518 (+0.04518) - 1best avg model score: 32.803 (+32.803) -           avg # pairs: 1266.3 -        avg # rank err: 857 -     avg # margin viol: 386.67 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.19474 (+0.19474) + 1best avg model score: 0.52232 +           avg # pairs: 2513 +   non-0 feature count: 11             avg list sz: 100 -           avg f count: 10.853 -(time 0.47 min, 9.3 s/S) - -Writing weights file to 'work/weights.0.0' ... -done +           avg f count: 11.42 +(time 0.32 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.04518]. -This took 0.46667 min. +Best iteration: 1 [GOLD = 0.19474]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1 index 0dbc7bd3..d0dee623 100644 --- a/training/dtrain/examples/parallelized/work/out.0.1 +++ b/training/dtrain/examples/parallelized/work/out.0.1 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 2283043509 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.0.0.in'                    output 'work/weights.0.1'                weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.17905 -       WordPenalty = +0.062126 -     LanguageModel = +0.66825 - LanguageModel_OOV = -0.15248 -     PhraseModel_0 = -0.55811 -     PhraseModel_1 = +0.12741 -     PhraseModel_2 = +0.60388 -     PhraseModel_3 = -0.44464 -     PhraseModel_4 = -0.63137 -     PhraseModel_5 = -0.0084 -     PhraseModel_6 = -0.20165 -       PassThrough = -0.23468 +              Glue = -0.40908 +       WordPenalty = +0.12967 +     LanguageModel = +0.39892 + LanguageModel_OOV = -0.6314 +     PhraseModel_0 = -0.63992 +     PhraseModel_1 = +0.74198 +     PhraseModel_2 = +1.3096 +     PhraseModel_3 = -0.1216 +     PhraseModel_4 = -1.2274 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.21093 +       PassThrough = -0.66155          --- -       1best avg score: 0.14066 (+0.14066) - 1best avg model score: -37.614 (-37.614) -           avg # pairs: 1244.7 -        avg # rank err: 728 -     avg # margin viol: 516.67 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.15735 (+0.15735) + 1best avg model score: 46.831 +           avg # pairs: 2132.3 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 11.507 -(time 0.45 min, 9 s/S) - -Writing weights file to 'work/weights.0.1' ... -done +           avg f count: 10.64 +(time 0.38 min, 7 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.14066]. -This took 0.45 min. +Best iteration: 1 [GOLD = 0.15735]. +This took 0.38333 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.2 b/training/dtrain/examples/parallelized/work/out.0.2 index fcecc7e1..9c4b110b 100644 --- a/training/dtrain/examples/parallelized/work/out.0.2 +++ b/training/dtrain/examples/parallelized/work/out.0.2 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 3693132895 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.0.0.in'                    output 'work/weights.0.2'                weights in 'work/weights.1' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.019275 -       WordPenalty = +0.022192 -     LanguageModel = +0.40688 - LanguageModel_OOV = -0.36397 -     PhraseModel_0 = -0.36273 -     PhraseModel_1 = +0.56432 -     PhraseModel_2 = +0.85638 -     PhraseModel_3 = -0.20222 -     PhraseModel_4 = -0.48295 -     PhraseModel_5 = +0.03145 -     PhraseModel_6 = -0.26092 -       PassThrough = -0.38122 +              Glue = -0.44422 +       WordPenalty = +0.1032 +     LanguageModel = +0.66474 + LanguageModel_OOV = -0.62252 +     PhraseModel_0 = -0.59993 +     PhraseModel_1 = +0.78992 +     PhraseModel_2 = +1.3149 +     PhraseModel_3 = +0.21434 +     PhraseModel_4 = -1.0174 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.18452 +       PassThrough = -0.65268          --- -       1best avg score: 0.18982 (+0.18982) - 1best avg model score: 1.7096 (+1.7096) -           avg # pairs: 1524.3 -        avg # rank err: 813.33 -     avg # margin viol: 702.67 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.24722 (+0.24722) + 1best avg model score: 61.971 +           avg # pairs: 2017.7 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 11.32 -(time 0.53 min, 11 s/S) - -Writing weights file to 'work/weights.0.2' ... -done +           avg f count: 10.42 +(time 0.3 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.18982]. -This took 0.53333 min. +Best iteration: 1 [GOLD = 0.24722]. +This took 0.3 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0 index 595dfc94..3dc4dca6 100644 --- a/training/dtrain/examples/parallelized/work/out.1.0 +++ b/training/dtrain/examples/parallelized/work/out.1.0 @@ -1,65 +1,43 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 859043351 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.1.0.in'                    output 'work/weights.1.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.3229 -       WordPenalty = +0.27969 -     LanguageModel = +1.3645 - LanguageModel_OOV = -0.0443 -     PhraseModel_0 = -0.19049 -     PhraseModel_1 = -0.077698 -     PhraseModel_2 = +0.058898 -     PhraseModel_3 = +0.017251 -     PhraseModel_4 = -1.5474 -     PhraseModel_5 = +0 -     PhraseModel_6 = -0.1818 -       PassThrough = -0.193 +              Glue = -0.2722 +       WordPenalty = +0.05433 +     LanguageModel = +0.69948 + LanguageModel_OOV = -0.2641 +     PhraseModel_0 = -1.4208 +     PhraseModel_1 = -1.563 +     PhraseModel_2 = -0.21051 +     PhraseModel_3 = -0.17764 +     PhraseModel_4 = -1.6583 +     PhraseModel_5 = +0.0794 +     PhraseModel_6 = +0.1528 +       PassThrough = -0.2367          --- -       1best avg score: 0.070229 (+0.070229) - 1best avg model score: -44.01 (-44.01) -           avg # pairs: 1294 -        avg # rank err: 878.67 -     avg # margin viol: 350.67 -       k-best loss imp: 100% -    non0 feature count: 11 +       1best avg score: 0.071329 (+0.071329) + 1best avg model score: -41.362 +           avg # pairs: 1862.3 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 11.487 -(time 0.28 min, 5.7 s/S) - -Writing weights file to 'work/weights.1.0' ... -done +           avg f count: 11.847 +(time 0.28 min, 5 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.070229]. +Best iteration: 1 [GOLD = 0.071329].  This took 0.28333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1 index 9346fc82..79ac35dc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.1 +++ b/training/dtrain/examples/parallelized/work/out.1.1 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 3557309480 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.1.0.in'                    output 'work/weights.1.1'                weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.26425 -       WordPenalty = +0.047881 -     LanguageModel = +0.78496 - LanguageModel_OOV = -0.49307 -     PhraseModel_0 = -0.58703 -     PhraseModel_1 = -0.33425 -     PhraseModel_2 = +0.20834 -     PhraseModel_3 = -0.043346 -     PhraseModel_4 = -0.60761 -     PhraseModel_5 = +0.123 -     PhraseModel_6 = -0.05415 -       PassThrough = -0.42167 +              Glue = -0.20488 +       WordPenalty = -0.0091745 +     LanguageModel = +0.79433 + LanguageModel_OOV = -0.4309 +     PhraseModel_0 = -0.56242 +     PhraseModel_1 = +0.85363 +     PhraseModel_2 = +1.3458 +     PhraseModel_3 = -0.13095 +     PhraseModel_4 = -0.94762 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.16003 +       PassThrough = -0.46105          --- -       1best avg score: 0.085952 (+0.085952) - 1best avg model score: -45.175 (-45.175) -           avg # pairs: 1180.7 -        avg # rank err: 668.33 -     avg # margin viol: 512.33 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.13017 (+0.13017) + 1best avg model score: 14.53 +           avg # pairs: 1968 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 12 -(time 0.27 min, 5.3 s/S) - -Writing weights file to 'work/weights.1.1' ... -done +           avg f count: 11 +(time 0.33 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.085952]. -This took 0.26667 min. +Best iteration: 1 [GOLD = 0.13017]. +This took 0.33333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.2 b/training/dtrain/examples/parallelized/work/out.1.2 index 08f07a75..8c4f8b03 100644 --- a/training/dtrain/examples/parallelized/work/out.1.2 +++ b/training/dtrain/examples/parallelized/work/out.1.2 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 56743915 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.1.0.in'                    output 'work/weights.1.2'                weights in 'work/weights.1' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.23608 -       WordPenalty = +0.10931 -     LanguageModel = +0.81339 - LanguageModel_OOV = -0.33238 -     PhraseModel_0 = -0.53685 -     PhraseModel_1 = -0.049658 -     PhraseModel_2 = +0.40277 -     PhraseModel_3 = +0.14601 -     PhraseModel_4 = -0.72851 -     PhraseModel_5 = +0.03475 -     PhraseModel_6 = -0.27192 -       PassThrough = -0.34763 +              Glue = -0.49853 +       WordPenalty = +0.07636 +     LanguageModel = +1.3183 + LanguageModel_OOV = -0.60902 +     PhraseModel_0 = -0.22481 +     PhraseModel_1 = +0.86369 +     PhraseModel_2 = +1.0747 +     PhraseModel_3 = +0.18002 +     PhraseModel_4 = -0.84661 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = +0.11247 +       PassThrough = -0.63918          --- -       1best avg score: 0.10073 (+0.10073) - 1best avg model score: -38.422 (-38.422) -           avg # pairs: 1505.3 -        avg # rank err: 777 -     avg # margin viol: 691.67 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.15478 (+0.15478) + 1best avg model score: -7.2154 +           avg # pairs: 1776 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 12 -(time 0.35 min, 7 s/S) - -Writing weights file to 'work/weights.1.2' ... -done +           avg f count: 11.327 +(time 0.27 min, 5 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.10073]. -This took 0.35 min. +Best iteration: 1 [GOLD = 0.15478]. +This took 0.26667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.0 b/training/dtrain/examples/parallelized/work/out.2.0 index 25ef6d4e..07c85963 100644 --- a/training/dtrain/examples/parallelized/work/out.2.0 +++ b/training/dtrain/examples/parallelized/work/out.2.0 @@ -1,65 +1,43 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 2662215673 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.2.0.in'                    output 'work/weights.2.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.1259 -       WordPenalty = +0.048294 -     LanguageModel = +0.36254 - LanguageModel_OOV = -0.1228 -     PhraseModel_0 = +0.26357 -     PhraseModel_1 = +0.24793 -     PhraseModel_2 = +0.0063763 -     PhraseModel_3 = -0.18966 -     PhraseModel_4 = -0.226 +              Glue = -0.2109 +       WordPenalty = +0.14922 +     LanguageModel = +0.79686 + LanguageModel_OOV = -0.6627 +     PhraseModel_0 = +0.37999 +     PhraseModel_1 = +0.69213 +     PhraseModel_2 = +0.3422 +     PhraseModel_3 = +1.1426 +     PhraseModel_4 = -0.55413       PhraseModel_5 = +0 -     PhraseModel_6 = +0.0743 -       PassThrough = -0.1335 +     PhraseModel_6 = +0.0676 +       PassThrough = -0.6343          --- -       1best avg score: 0.072836 (+0.072836) - 1best avg model score: -0.56296 (-0.56296) -           avg # pairs: 1094.7 -        avg # rank err: 658 -     avg # margin viol: 436.67 -       k-best loss imp: 100% -    non0 feature count: 11 +       1best avg score: 0.072374 (+0.072374) + 1best avg model score: -27.384 +           avg # pairs: 2582 +   non-0 feature count: 11             avg list sz: 100 -           avg f count: 10.813 -(time 0.13 min, 2.7 s/S) - -Writing weights file to 'work/weights.2.0' ... -done +           avg f count: 11.54 +(time 0.32 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.072836]. -This took 0.13333 min. +Best iteration: 1 [GOLD = 0.072374]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.1 b/training/dtrain/examples/parallelized/work/out.2.1 index 8e4efde9..c54bb1b1 100644 --- a/training/dtrain/examples/parallelized/work/out.2.1 +++ b/training/dtrain/examples/parallelized/work/out.2.1 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 3092904479 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.2.0.in'                    output 'work/weights.2.1'                weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.10385 -       WordPenalty = +0.038717 -     LanguageModel = +0.49413 - LanguageModel_OOV = -0.24887 -     PhraseModel_0 = -0.32102 -     PhraseModel_1 = +0.34413 -     PhraseModel_2 = +0.62366 -     PhraseModel_3 = -0.49337 -     PhraseModel_4 = -0.77005 -     PhraseModel_5 = +0.007 -     PhraseModel_6 = -0.05055 -       PassThrough = -0.23928 +              Glue = -0.76608 +       WordPenalty = +0.15938 +     LanguageModel = +1.5897 + LanguageModel_OOV = -0.521 +     PhraseModel_0 = -0.58348 +     PhraseModel_1 = +0.29828 +     PhraseModel_2 = +0.78493 +     PhraseModel_3 = +0.083222 +     PhraseModel_4 = -0.93843 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.27382 +       PassThrough = -0.55115          --- -       1best avg score: 0.10245 (+0.10245) - 1best avg model score: -20.384 (-20.384) -           avg # pairs: 1741.7 -        avg # rank err: 953.67 -     avg # margin viol: 585.33 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.12881 (+0.12881) + 1best avg model score: -9.6731 +           avg # pairs: 2020.3 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 11.977 -(time 0.12 min, 2.3 s/S) - -Writing weights file to 'work/weights.2.1' ... -done +           avg f count: 12 +(time 0.32 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.10245]. -This took 0.11667 min. +Best iteration: 1 [GOLD = 0.12881]. +This took 0.31667 min. diff --git a/training/dtrain/examples/parallelized/work/out.2.2 b/training/dtrain/examples/parallelized/work/out.2.2 index e0ca2110..f5d6229f 100644 --- a/training/dtrain/examples/parallelized/work/out.2.2 +++ b/training/dtrain/examples/parallelized/work/out.2.2 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 2803362953 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.2.0.in'                    output 'work/weights.2.2'                weights in 'work/weights.1' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  3 + .... 3  WEIGHTS -              Glue = -0.32907 -       WordPenalty = +0.049596 -     LanguageModel = +0.33496 - LanguageModel_OOV = -0.44357 -     PhraseModel_0 = -0.3068 -     PhraseModel_1 = +0.59376 -     PhraseModel_2 = +0.86416 -     PhraseModel_3 = -0.21072 -     PhraseModel_4 = -0.65734 -     PhraseModel_5 = +0.03475 -     PhraseModel_6 = -0.10653 -       PassThrough = -0.46082 +              Glue = -0.90863 +       WordPenalty = +0.10819 +     LanguageModel = +0.5239 + LanguageModel_OOV = -0.41623 +     PhraseModel_0 = -0.86868 +     PhraseModel_1 = +0.40784 +     PhraseModel_2 = +1.1793 +     PhraseModel_3 = -0.24698 +     PhraseModel_4 = -1.2353 +     PhraseModel_5 = +0.03375 +     PhraseModel_6 = -0.17883 +       PassThrough = -0.44638          --- -       1best avg score: 0.25055 (+0.25055) - 1best avg model score: -1.4459 (-1.4459) -           avg # pairs: 1689 -        avg # rank err: 755.67 -     avg # margin viol: 829.33 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.12788 (+0.12788) + 1best avg model score: 41.302 +           avg # pairs: 2246.3 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 10.53 -(time 0.13 min, 2.7 s/S) - -Writing weights file to 'work/weights.2.2' ... -done +           avg f count: 10.98 +(time 0.35 min, 7 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.25055]. -This took 0.13333 min. +Best iteration: 1 [GOLD = 0.12788]. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.0 b/training/dtrain/examples/parallelized/work/out.3.0 index 3c074f04..fa499523 100644 --- a/training/dtrain/examples/parallelized/work/out.3.0 +++ b/training/dtrain/examples/parallelized/work/out.3.0 @@ -1,65 +1,43 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 316107185 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.3.0.in'                    output 'work/weights.3.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  1 + .. 1  WEIGHTS -              Glue = +0.046 -       WordPenalty = +0.17328 -     LanguageModel = +1.1667 - LanguageModel_OOV = +0.066 -     PhraseModel_0 = -1.1694 -     PhraseModel_1 = -0.9883 -     PhraseModel_2 = +0.036205 -     PhraseModel_3 = -0.77387 -     PhraseModel_4 = -1.5019 -     PhraseModel_5 = +0.024 -     PhraseModel_6 = -0.514 -       PassThrough = +0.031 +              Glue = -0.09 +       WordPenalty = +0.32442 +     LanguageModel = +2.5769 + LanguageModel_OOV = -0.009 +     PhraseModel_0 = -0.58972 +     PhraseModel_1 = +0.063691 +     PhraseModel_2 = +0.5366 +     PhraseModel_3 = +0.12867 +     PhraseModel_4 = -1.9801 +     PhraseModel_5 = +0.018 +     PhraseModel_6 = -0.486 +       PassThrough = -0.09          --- -       1best avg score: 0.032916 (+0.032916) - 1best avg model score: 0 (+0) -           avg # pairs: 900 -        avg # rank err: 900 -     avg # margin viol: 0 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.034204 (+0.034204) + 1best avg model score: 0 +           avg # pairs: 1700 +   non-0 feature count: 12             avg list sz: 100 -           avg f count: 11.72 -(time 0.23 min, 14 s/S) - -Writing weights file to 'work/weights.3.0' ... -done +           avg f count: 10.8 +(time 0.1 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.032916]. -This took 0.23333 min. +Best iteration: 1 [GOLD = 0.034204]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.1 b/training/dtrain/examples/parallelized/work/out.3.1 index 241d3455..c4b3aa3c 100644 --- a/training/dtrain/examples/parallelized/work/out.3.1 +++ b/training/dtrain/examples/parallelized/work/out.3.1 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 353677750 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.3.0.in'                    output 'work/weights.3.1'                weights in 'work/weights.0' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  1 + .. 1  WEIGHTS -              Glue = -0.08475 -       WordPenalty = +0.11151 -     LanguageModel = +1.0635 - LanguageModel_OOV = -0.11468 -     PhraseModel_0 = -0.062922 -     PhraseModel_1 = +0.0035552 -     PhraseModel_2 = +0.039692 -     PhraseModel_3 = +0.080265 -     PhraseModel_4 = -0.57787 -     PhraseModel_5 = +0.0174 -     PhraseModel_6 = -0.17095 -       PassThrough = -0.18248 +              Glue = +0.31832 +       WordPenalty = +0.11139 +     LanguageModel = +0.95438 + LanguageModel_OOV = -0.0608 +     PhraseModel_0 = -0.98113 +     PhraseModel_1 = -0.090531 +     PhraseModel_2 = +0.79088 +     PhraseModel_3 = -0.57623 +     PhraseModel_4 = -1.4382 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.10812 +       PassThrough = -0.09095          --- -       1best avg score: 0.16117 (+0.16117) - 1best avg model score: -67.89 (-67.89) -           avg # pairs: 1411 -        avg # rank err: 460 -     avg # margin viol: 951 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.084989 (+0.084989) + 1best avg model score: -52.323 +           avg # pairs: 2487 +   non-0 feature count: 12             avg list sz: 100             avg f count: 12 -(time 0.22 min, 13 s/S) - -Writing weights file to 'work/weights.3.1' ... -done +(time 0.1 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.16117]. -This took 0.21667 min. +Best iteration: 1 [GOLD = 0.084989]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/out.3.2 b/training/dtrain/examples/parallelized/work/out.3.2 index b995daf5..eb27dac2 100644 --- a/training/dtrain/examples/parallelized/work/out.3.2 +++ b/training/dtrain/examples/parallelized/work/out.3.2 @@ -1,66 +1,44 @@ -                cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file.  Reading ../standard/nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 3001145976 -  dtrain  Parameters:                         k 100                         N 4                         T 1 -                   batch 0 -                  scorer 'stupid_bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 0.0001 -                   gamma 0 -             loss margin 1 -       faster perceptron 0 -                   pairs 'XYX' -                   hi lo 0.1 -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' +            error margin 1 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'work/shard.3.0.in'                    output 'work/weights.3.2'                weights in 'work/weights.1' -(a dot represents 10 inputs) +(a dot per input)  Iteration #1 of 1. -  1 + .. 1  WEIGHTS -              Glue = -0.13247 -       WordPenalty = +0.053592 -     LanguageModel = +0.72105 - LanguageModel_OOV = -0.30827 -     PhraseModel_0 = -0.37053 -     PhraseModel_1 = +0.17551 -     PhraseModel_2 = +0.5 -     PhraseModel_3 = -0.1459 -     PhraseModel_4 = -0.59563 -     PhraseModel_5 = +0.03475 -     PhraseModel_6 = -0.11143 -       PassThrough = -0.32553 +              Glue = -0.12993 +       WordPenalty = +0.13651 +     LanguageModel = +0.58946 + LanguageModel_OOV = -0.48362 +     PhraseModel_0 = -0.81262 +     PhraseModel_1 = +0.44273 +     PhraseModel_2 = +1.1733 +     PhraseModel_3 = -0.1826 +     PhraseModel_4 = -1.2213 +     PhraseModel_5 = +0.02435 +     PhraseModel_6 = -0.18823 +       PassThrough = -0.51378          --- -       1best avg score: 0.12501 (+0.12501) - 1best avg model score: -62.128 (-62.128) -           avg # pairs: 979 -        avg # rank err: 539 -     avg # margin viol: 440 -       k-best loss imp: 100% -    non0 feature count: 12 +       1best avg score: 0.12674 (+0.12674) + 1best avg model score: -7.2878 +           avg # pairs: 1769 +   non-0 feature count: 12             avg list sz: 100             avg f count: 12 -(time 0.22 min, 13 s/S) - -Writing weights file to 'work/weights.3.2' ... -done +(time 0.1 min, 6 s/S)  --- -Best iteration: 1 [SCORE 'stupid_bleu'=0.12501]. -This took 0.21667 min. +Best iteration: 1 [GOLD = 0.12674]. +This took 0.1 min. diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in index d1b48321..a0ef6f54 100644 --- a/training/dtrain/examples/parallelized/work/shard.0.0.in +++ b/training/dtrain/examples/parallelized/work/shard.0.0.in @@ -1,3 +1,3 @@ -<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . -<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . +<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house +<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .  <seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in index a63f05bd..05f0273b 100644 --- a/training/dtrain/examples/parallelized/work/shard.1.0.in +++ b/training/dtrain/examples/parallelized/work/shard.1.0.in @@ -1,3 +1,3 @@ -<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . -<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .  <seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . +<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . +<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . diff --git a/training/dtrain/examples/parallelized/work/shard.2.0.in b/training/dtrain/examples/parallelized/work/shard.2.0.in index fe542b40..0528d357 100644 --- a/training/dtrain/examples/parallelized/work/shard.2.0.in +++ b/training/dtrain/examples/parallelized/work/shard.2.0.in @@ -1,3 +1,3 @@ -<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . -<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house  <seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows . +<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . +<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . diff --git a/training/dtrain/examples/parallelized/work/shard.3.0.in b/training/dtrain/examples/parallelized/work/shard.3.0.in index 4a8fa5b1..f7cbb3e3 100644 --- a/training/dtrain/examples/parallelized/work/shard.3.0.in +++ b/training/dtrain/examples/parallelized/work/shard.3.0.in @@ -1 +1 @@ -<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . +<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0 index c560fdbd..816269cd 100644 --- a/training/dtrain/examples/parallelized/work/weights.0 +++ b/training/dtrain/examples/parallelized/work/weights.0 @@ -1,12 +1,12 @@ -PhraseModel_4	-0.6990170657294328 -LanguageModel	0.891784887346263 -PhraseModel_0	-0.2107507586515428 -PhraseModel_1	-0.15442709655871997 -PhraseModel_3	-0.07262514338204715 -PhraseModel_6	-0.10965000000000148 -Glue	-0.03644999999999783 -WordPenalty	0.13204723722268177 -PassThrough	-0.09437500000000089 -LanguageModel_OOV	-0.036775000000000564 -PhraseModel_2	0.025521702385571707 -PhraseModel_5	0.006999999999999977 +LanguageModel	1.200704259340465 +PhraseModel_4	-1.2434381298299035 +PhraseModel_1	0.050697726409824076 +PhraseModel_0	-0.516923312932941 +PhraseModel_2	0.5051987092783867 +PhraseModel_3	0.20955092377784057 +PassThrough	-0.32285 +LanguageModel_OOV	-0.29269999999999996 +PhraseModel_6	-0.158425 +Glue	-0.05817500000000002 +WordPenalty	0.12758486142112804 +PhraseModel_5	0.02435 diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0 index 91eedc7b..be386c62 100644 --- a/training/dtrain/examples/parallelized/work/weights.0.0 +++ b/training/dtrain/examples/parallelized/work/weights.0.0 @@ -1,12 +1,11 @@ -PassThrough	-0.082000000000001058 -Glue	0.25700000000000267 -LanguageModel_OOV	-0.046000000000000034 -LanguageModel	0.67341721152744249 -PhraseModel_6	0.18290000000000028 -PhraseModel_5	0.0039999999999999975 -PhraseModel_4	0.47916377173928498 -PhraseModel_3	0.65577926367715722 -PhraseModel_2	0.00060731048591637909 -PhraseModel_0	0.25329462707903372 -WordPenalty	0.026926257878001431 -PhraseModel_1	0.20035945197369062 +WordPenalty -0.017632355965271129 +LanguageModel 0.72957628464102753 +LanguageModel_OOV -0.23499999999999999 +PhraseModel_0 -0.43720953659541578 +PhraseModel_1 1.0100170838129212 +PhraseModel_2 1.3524984123857073 +PhraseModel_3 -0.25541132249775761 +PhraseModel_4 -0.78115161368856911 +PhraseModel_6 -0.36810000000000004 +Glue 0.34040000000000004 +PassThrough -0.33040000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1 index 6fcc9999..d4c77d07 100644 --- a/training/dtrain/examples/parallelized/work/weights.0.1 +++ b/training/dtrain/examples/parallelized/work/weights.0.1 @@ -1,12 +1,12 @@ -PassThrough	-0.2346750000000028 -Glue	-0.17904999999999763 -WordPenalty	0.062125825636256168 -LanguageModel	0.66824625053667575 -LanguageModel_OOV	-0.15247500000000355 -PhraseModel_0	-0.5581144363944085 -PhraseModel_1	0.12740874153205478 -PhraseModel_2	0.6038779278708799 -PhraseModel_3	-0.44463820299544454 -PhraseModel_4	-0.63136538282212662 -PhraseModel_5	-0.0084000000000000324 -PhraseModel_6	-0.20164999999999911 +WordPenalty 0.12966947493426365 +LanguageModel 0.3989224621154368 +LanguageModel_OOV -0.63139999999999996 +PhraseModel_0 -0.63991953012355962 +PhraseModel_1 0.74197897612368646 +PhraseModel_2 1.3096163833051435 +PhraseModel_3 -0.12160001974680773 +PhraseModel_4 -1.2274031286515816 +PhraseModel_5 0.02435 +PhraseModel_6 -0.210925 +Glue -0.40907500000000002 +PassThrough -0.66155000000000008 diff --git a/training/dtrain/examples/parallelized/work/weights.0.2 b/training/dtrain/examples/parallelized/work/weights.0.2 index 5668915d..8ce1449b 100644 --- a/training/dtrain/examples/parallelized/work/weights.0.2 +++ b/training/dtrain/examples/parallelized/work/weights.0.2 @@ -1,12 +1,12 @@ -PassThrough	-0.38122499999999337 -Glue	-0.019274999999998679 -WordPenalty	0.022192448025253487 -LanguageModel	0.4068780855136106 -LanguageModel_OOV	-0.363974999999992 -PhraseModel_0	-0.36273429313029715 -PhraseModel_1	0.56431752511029298 -PhraseModel_2	0.85638010019687694 -PhraseModel_3	-0.20222345248738063 -PhraseModel_4	-0.48295466434310252 -PhraseModel_5	0.031450000000000339 -PhraseModel_6	-0.26092499999998625 +WordPenalty 0.10319922626226019 +LanguageModel 0.6647396869692952 +LanguageModel_OOV -0.622525 +PhraseModel_0 -0.59993441316076157 +PhraseModel_1 0.78991513935858193 +PhraseModel_2 1.3148638774685031 +PhraseModel_3 0.2143393571820455 +PhraseModel_4 -1.0173894637028262 +PhraseModel_5 0.02435 +PhraseModel_6 -0.18452499999999999 +Glue -0.44422499999999998 +PassThrough -0.65267500000000012 diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1 index f52e07b8..2a00be2e 100644 --- a/training/dtrain/examples/parallelized/work/weights.1 +++ b/training/dtrain/examples/parallelized/work/weights.1 @@ -1,12 +1,12 @@ -LanguageModel	0.7527067666152598 -PhraseModel_4	-0.6467221787583058 -PhraseModel_2	0.36889175522051015 -PhraseModel_0	-0.38227173053779245 -PhraseModel_3	-0.2252732111174934 -LanguageModel_OOV	-0.25227499999999975 -PassThrough	-0.2695250000000011 -PhraseModel_1	0.03521067244127414 -Glue	-0.1579749999999981 -PhraseModel_6	-0.11932500000000047 -WordPenalty	0.0650573133891042 -PhraseModel_5	0.03475000000000043 +PhraseModel_4	-1.1379250444170055 +PhraseModel_2	1.0578050661336098 +LanguageModel	0.9343385461706668 +PhraseModel_0	-0.6917392152965985 +PhraseModel_1	0.4508371141128957 +PassThrough	-0.4411750000000001 +Glue	-0.265425 +LanguageModel_OOV	-0.411025 +PhraseModel_3	-0.186390082624459 +PhraseModel_6	-0.188225 +WordPenalty	0.09781397468665984 +PhraseModel_5	0.02435 diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0 index 31e08d81..cdcf959e 100644 --- a/training/dtrain/examples/parallelized/work/weights.1.0 +++ b/training/dtrain/examples/parallelized/work/weights.1.0 @@ -1,11 +1,12 @@ -LanguageModel_OOV	-0.044300000000000235 -PassThrough	-0.19300000000000087 -PhraseModel_6	-0.18180000000000701 -LanguageModel	1.3644969337716422 -PhraseModel_3	0.017250706134911725 -PhraseModel_4	-1.5473728273858063 -Glue	-0.32289999999999447 -PhraseModel_1	-0.077697953502182365 -WordPenalty	0.27968564634568688 -PhraseModel_0	-0.19048660891012237 -PhraseModel_2	0.05889844333199834 +WordPenalty 0.05433023968609621 +LanguageModel 0.69947965605855011 +LanguageModel_OOV -0.2641 +PhraseModel_0 -1.4207505705360111 +PhraseModel_1 -1.563047680441811 +PhraseModel_2 -0.21050528366541305 +PhraseModel_3 -0.17764037275860439 +PhraseModel_4 -1.6583462458159566 +PhraseModel_5 0.079399999999999998 +PhraseModel_6 0.15280000000000002 +Glue -0.27220000000000005 +PassThrough -0.23670000000000002 diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1 index 544ff462..c1bb2cf0 100644 --- a/training/dtrain/examples/parallelized/work/weights.1.1 +++ b/training/dtrain/examples/parallelized/work/weights.1.1 @@ -1,12 +1,12 @@ -PassThrough	-0.42167499999999858 -Glue	-0.26424999999999721 -WordPenalty	0.04788096662983269 -LanguageModel	0.78495517342352483 -LanguageModel_OOV	-0.49307499999999477 -PhraseModel_0	-0.58703462849498356 -PhraseModel_1	-0.33425278954714266 -PhraseModel_2	0.20834221229630179 -PhraseModel_3	-0.043345645640208569 -PhraseModel_4	-0.60760531115816907 -PhraseModel_5	0.12300000000000186 -PhraseModel_6	-0.054150000000001031 +WordPenalty -0.0091744709302067785 +LanguageModel 0.79433413663506514 +LanguageModel_OOV -0.43090000000000001 +PhraseModel_0 -0.56242499947237046 +PhraseModel_1 0.85362516703032698 +PhraseModel_2 1.3457900890481096 +PhraseModel_3 -0.13095079554478939 +PhraseModel_4 -0.94761908497413061 +PhraseModel_5 0.02435 +PhraseModel_6 -0.160025 +Glue -0.20487500000000003 +PassThrough -0.46105000000000007 diff --git a/training/dtrain/examples/parallelized/work/weights.1.2 b/training/dtrain/examples/parallelized/work/weights.1.2 index ac3284b9..c9598a04 100644 --- a/training/dtrain/examples/parallelized/work/weights.1.2 +++ b/training/dtrain/examples/parallelized/work/weights.1.2 @@ -1,12 +1,12 @@ -PassThrough	-0.34762500000000224 -Glue	-0.23607500000000026 -WordPenalty	0.10931192109504413 -LanguageModel	0.81339027211983694 -LanguageModel_OOV	-0.33237500000000098 -PhraseModel_0	-0.53685104648974269 -PhraseModel_1	-0.049657790506137042 -PhraseModel_2	0.40277066454544108 -PhraseModel_3	0.14600791389785803 -PhraseModel_4	-0.72850673041349101 -PhraseModel_5	0.034750000000000433 -PhraseModel_6	-0.27192499999999448 +WordPenalty 0.076359827280638559 +LanguageModel 1.3183380272921175 +LanguageModel_OOV -0.60902499999999993 +PhraseModel_0 -0.2248075206657828 +PhraseModel_1 0.86368802571834491 +PhraseModel_2 1.0746702462261808 +PhraseModel_3 0.18002263643876637 +PhraseModel_4 -0.84660750337519441 +PhraseModel_5 0.02435 +PhraseModel_6 0.11247499999999999 +Glue -0.49852500000000005 +PassThrough -0.63917500000000005 diff --git a/training/dtrain/examples/parallelized/work/weights.2 b/training/dtrain/examples/parallelized/work/weights.2 index dedaf165..310973ec 100644 --- a/training/dtrain/examples/parallelized/work/weights.2 +++ b/training/dtrain/examples/parallelized/work/weights.2 @@ -1,12 +1,12 @@ -PhraseModel_2	0.6558266927225778 -PhraseModel_4	-0.6161090299356294 -LanguageModel	0.5690697644415413 -PhraseModel_1	0.32098232482479416 -PhraseModel_0	-0.39422813904895143 -PassThrough	-0.37879999999999764 -LanguageModel_OOV	-0.3620499999999963 -Glue	-0.1792249999999967 -PhraseModel_6	-0.18769999999999526 -PhraseModel_3	-0.10321074877850786 -WordPenalty	0.05867318450512617 -PhraseModel_5	0.03392500000000041 +PhraseModel_2	1.185520780812669 +PhraseModel_4	-1.0801541070647134 +LanguageModel	0.7741099486587568 +PhraseModel_0	-0.6265095873268189 +PhraseModel_1	0.6260421233840029 +PassThrough	-0.5630000000000002 +Glue	-0.495325 +LanguageModel_OOV	-0.53285 +PhraseModel_3	-0.008805626854390465 +PhraseModel_6	-0.10977500000000001 +WordPenalty	0.1060655698428214 +PhraseModel_5	0.026699999999999998 diff --git a/training/dtrain/examples/parallelized/work/weights.2.0 b/training/dtrain/examples/parallelized/work/weights.2.0 index f7ece54d..3e87fed4 100644 --- a/training/dtrain/examples/parallelized/work/weights.2.0 +++ b/training/dtrain/examples/parallelized/work/weights.2.0 @@ -1,11 +1,11 @@ -LanguageModel_OOV	-0.12280000000000209 -PassThrough	-0.13350000000000165 -Glue	-0.1259000000000001 -PhraseModel_1	0.24792740418949952 -WordPenalty	0.048293546387642321 -PhraseModel_0	0.26356693580129958 -PhraseModel_2	0.0063762787517740458 -PhraseModel_3	-0.18966358382769741 -PhraseModel_4	-0.22599681869670471 -PhraseModel_6	0.074299999999999047 -LanguageModel	0.3625416478537038 +WordPenalty 0.14922358398195767 +LanguageModel 0.79685677298009394 +LanguageModel_OOV -0.66270000000000007 +PhraseModel_0 0.37998874905310187 +PhraseModel_1 0.69213063228111271 +PhraseModel_2 0.34219807728516061 +PhraseModel_3 1.1425846772648622 +PhraseModel_4 -0.55412548521619742 +PhraseModel_6 0.067599999999999993 +Glue -0.21090000000000003 +PassThrough -0.63429999999999997 diff --git a/training/dtrain/examples/parallelized/work/weights.2.1 b/training/dtrain/examples/parallelized/work/weights.2.1 index 0946609d..d129dc49 100644 --- a/training/dtrain/examples/parallelized/work/weights.2.1 +++ b/training/dtrain/examples/parallelized/work/weights.2.1 @@ -1,12 +1,12 @@ -PassThrough	-0.23927500000000015 -Glue	-0.10384999999999919 -WordPenalty	0.038717353061671053 -LanguageModel	0.49412782572695274 -LanguageModel_OOV	-0.24887499999999915 -PhraseModel_0	-0.32101572713801541 -PhraseModel_1	0.34413149733472631 -PhraseModel_2	0.62365535622061474 -PhraseModel_3	-0.49337445280658987 -PhraseModel_4	-0.77004673375347765 -PhraseModel_5	0.0069999999999999767 -PhraseModel_6	-0.05055000000000108 +WordPenalty 0.1593752174964457 +LanguageModel 1.5897162231676281 +LanguageModel_OOV -0.52100000000000002 +PhraseModel_0 -0.5834836741748588 +PhraseModel_1 0.29827543837280185 +PhraseModel_2 0.78493316593562568 +PhraseModel_3 0.083221832554333464 +PhraseModel_4 -0.93843312963279457 +PhraseModel_5 0.02435 +PhraseModel_6 -0.27382499999999999 +Glue -0.76607500000000006 +PassThrough -0.55115000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.2.2 b/training/dtrain/examples/parallelized/work/weights.2.2 index b766fc75..bcc83b44 100644 --- a/training/dtrain/examples/parallelized/work/weights.2.2 +++ b/training/dtrain/examples/parallelized/work/weights.2.2 @@ -1,12 +1,12 @@ -PassThrough	-0.46082499999999499 -Glue	-0.32907499999998979 -WordPenalty	0.049596429833348527 -LanguageModel	0.33496341201347335 -LanguageModel_OOV	-0.44357499999999361 -PhraseModel_0	-0.30679883980783829 -PhraseModel_1	0.5937585900939707 -PhraseModel_2	0.86415970329021152 -PhraseModel_3	-0.21072279838022553 -PhraseModel_4	-0.65734339854224544 -PhraseModel_5	0.034750000000000433 -PhraseModel_6	-0.10652500000000011 +WordPenalty 0.10819361280414735 +LanguageModel 0.52389743342585859 +LanguageModel_OOV -0.41622500000000001 +PhraseModel_0 -0.86867995703334211 +PhraseModel_1 0.40783818771767943 +PhraseModel_2 1.1792706530114188 +PhraseModel_3 -0.2469805689928464 +PhraseModel_4 -1.2352895858909159 +PhraseModel_5 0.033750000000000002 +PhraseModel_6 -0.17882500000000001 +Glue -0.90862500000000002 +PassThrough -0.44637500000000013 diff --git a/training/dtrain/examples/parallelized/work/weights.3.0 b/training/dtrain/examples/parallelized/work/weights.3.0 index 403ffbb3..e3586048 100644 --- a/training/dtrain/examples/parallelized/work/weights.3.0 +++ b/training/dtrain/examples/parallelized/work/weights.3.0 @@ -1,12 +1,12 @@ -PhraseModel_4	-1.501862388574505 -PhraseModel_3	-0.77386695951256013 -PhraseModel_6	-0.51399999999999824 -PhraseModel_5	0.02399999999999991 -LanguageModel	1.1666837562322641 -PhraseModel_2	0.036204776972598059 -PassThrough	0.030999999999999975 -Glue	0.046000000000000582 -PhraseModel_1	-0.98829728889588764 -WordPenalty	0.1732834982793964 -PhraseModel_0	-1.1693779885763822 -LanguageModel_OOV	0.066000000000000086 +WordPenalty 0.32441797798172944 +LanguageModel 2.5769043236821889 +LanguageModel_OOV -0.0090000000000000011 +PhraseModel_0 -0.58972189365343919 +PhraseModel_1 0.063690869987073351 +PhraseModel_2 0.53660363110809217 +PhraseModel_3 0.12867071310286207 +PhraseModel_4 -1.9801291745988916 +PhraseModel_5 0.018000000000000002 +PhraseModel_6 -0.48600000000000004 +Glue -0.090000000000000011 +PassThrough -0.090000000000000011 diff --git a/training/dtrain/examples/parallelized/work/weights.3.1 b/training/dtrain/examples/parallelized/work/weights.3.1 index c171d586..b27687d3 100644 --- a/training/dtrain/examples/parallelized/work/weights.3.1 +++ b/training/dtrain/examples/parallelized/work/weights.3.1 @@ -1,12 +1,12 @@ -PassThrough	-0.18247500000000313 -Glue	-0.084749999999998368 -WordPenalty	0.11150510822865688 -LanguageModel	1.063497816773886 -LanguageModel_OOV	-0.1146750000000015 -PhraseModel_0	-0.062922130123762257 -PhraseModel_1	0.0035552404454581212 -PhraseModel_2	0.039691524494244249 -PhraseModel_3	0.080265456972269417 -PhraseModel_4	-0.57787128729945014 -PhraseModel_5	0.017399999999999922 -PhraseModel_6	-0.17095000000000066 +WordPenalty 0.11138567724613679 +LanguageModel 0.95438136276453733 +LanguageModel_OOV -0.060799999999999937 +PhraseModel_0 -0.98112865741560529 +PhraseModel_1 -0.090531125075232435 +PhraseModel_2 0.79088062624556033 +PhraseModel_3 -0.57623134776057228 +PhraseModel_4 -1.4382448344095151 +PhraseModel_5 0.02435 +PhraseModel_6 -0.108125 +Glue 0.31832499999999997 +PassThrough -0.090950000000000003 diff --git a/training/dtrain/examples/parallelized/work/weights.3.2 b/training/dtrain/examples/parallelized/work/weights.3.2 index 3ff0411d..ccb591a2 100644 --- a/training/dtrain/examples/parallelized/work/weights.3.2 +++ b/training/dtrain/examples/parallelized/work/weights.3.2 @@ -1,12 +1,12 @@ -PassThrough	-0.32552500000000006 -Glue	-0.13247499999999815 -WordPenalty	0.053591939066858545 -LanguageModel	0.72104728811924446 -LanguageModel_OOV	-0.30827499999999869 -PhraseModel_0	-0.37052837676792744 -PhraseModel_1	0.17551097460105014 -PhraseModel_2	0.49999630285778179 -PhraseModel_3	-0.14590465814428336 -PhraseModel_4	-0.59563132644367889 -PhraseModel_5	0.034750000000000433 -PhraseModel_6	-0.11142500000000025 +WordPenalty 0.13650961302423945 +LanguageModel 0.58946464694775647 +LanguageModel_OOV -0.48362499999999997 +PhraseModel_0 -0.81261645844738917 +PhraseModel_1 0.44272714074140529 +PhraseModel_2 1.1732783465445731 +PhraseModel_3 -0.18260393204552733 +PhraseModel_4 -1.2213298752899167 +PhraseModel_5 0.02435 +PhraseModel_6 -0.188225 +Glue -0.12992500000000001 +PassThrough -0.51377500000000009 diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index a515db02..f2698007 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -1,27 +1,10 @@ -#input=./nc-wmt11.de.gz -#refs=./nc-wmt11.en.gz -bitext=./nc-wmt11.gz +bitext=./nc-wmt11.gz      # input bitext  output=-                  # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=avg        # output average (over epochs) weight vector  decoder_config=./cdec.ini # config for cdec -# weights for these features will be printed on each iteration +iterations=3              # run over input 3 times +k=100                     # use 100best lists +N=4                       # optimize (approx.) BLEU4 +learning_rate=0.1         # learning rate +error_margin=1.0          # margin for margin perceptron  print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -# newer version of the grammar extractor use different feature names:  -#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV -stop_after=10 # stop epoch after 10 inputs -# interesting stuff -epochs=3                 # run over input 3 times -k=100                    # use 100best lists -N=4                      # optimize (approx) BLEU4 -scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.1        # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) -gamma=0                  # use SVM reg -sample_from=kbest        # use kbest lists (as opposed to forest) -filter=uniq              # only unique entries in kbest (surface form) -pair_sampling=XYX        # -hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0         # minimum distance in BLEU (here: > 0) -loss_margin=0            # update if correctly ranked, but within this margin -repeat=1                 # repeat training on a kbest list 1 times  -#batch=true              # batch tuning, update after accumulating over all sentences and all kbest lists diff --git a/training/dtrain/examples/toy/dtrain.ini b/training/dtrain/examples/toy/dtrain.ini index 70c7331c..378224b8 100644 --- a/training/dtrain/examples/toy/dtrain.ini +++ b/training/dtrain/examples/toy/dtrain.ini @@ -1,12 +1,8 @@  decoder_config=cdec.ini  bitext=in -output=- -print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6 +output=weights  k=4  N=4 -epochs=2 -scorer=bleu -sample_from=kbest -filter=uniq -pair_sampling=all +iterations=2  learning_rate=1 +print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6 diff --git a/training/dtrain/examples/toy/expected-output b/training/dtrain/examples/toy/expected-output index 3c3a5a18..8c758d00 100644 --- a/training/dtrain/examples/toy/expected-output +++ b/training/dtrain/examples/toy/expected-output @@ -1,40 +1,24 @@ -Warning: hi_lo only works with pair_sampling XYX. -                cdec cfg 'cdec.ini' -Seeding random number sequence to 3644621239 -  dtrain  Parameters:                         k 4                         N 4                         T 2 -                   batch 0 -                  scorer 'bleu' -             sample from 'kbest' -                  filter 'uniq'             learning rate 1 -                   gamma 0 -             loss margin 0 -       faster perceptron 1 -                   pairs 'all' -          pair threshold 0 -          select weights 'last' -                  l1 reg 0 'none' -                    pclr no -               max pairs 4294967295 -                  repeat 1 -                cdec cfg 'cdec.ini' -                   input '' -                  output '-' -(a dot represents 10 inputs) +            error margin 0 +                  l1 reg 0 +            decoder conf 'cdec.ini' +                   input 'in' +                  output 'weights' +(a dot per input)  Iteration #1 of 2. -  2 + ... 2  WEIGHTS                logp = +0 -        shell_rule = -1 -        house_rule = +2 -        small_rule = -2 +        shell_rule = +0 +        house_rule = +3 +        small_rule = +0         little_rule = +3 -       PassThrough = -5 +       PassThrough = -15       PassThrough_1 = +0       PassThrough_2 = +0       PassThrough_3 = +0 @@ -42,26 +26,23 @@ WEIGHTS       PassThrough_5 = +0       PassThrough_6 = +0          --- -       1best avg score: 0.5 (+0.5) - 1best avg model score: 2.5 (+2.5) -           avg # pairs: 1.5 -        avg # rank err: 1.5 (meaningless) -     avg # margin viol: 0 -       k-best loss imp: 100% -    non0 feature count: 6 +       1best avg score: 0.40937 (+0.40937) + 1best avg model score: 3 +           avg # pairs: 2.5 +   non-0 feature count: 4             avg list sz: 4             avg f count: 2.875  (time 0 min, 0 s/S)  Iteration #2 of 2. -  2 + ... 2  WEIGHTS                logp = +0 -        shell_rule = -1 -        house_rule = +2 -        small_rule = -2 +        shell_rule = +0 +        house_rule = +3 +        small_rule = +0         little_rule = +3 -       PassThrough = -5 +       PassThrough = -15       PassThrough_1 = +0       PassThrough_2 = +0       PassThrough_3 = +0 @@ -69,26 +50,14 @@ WEIGHTS       PassThrough_5 = +0       PassThrough_6 = +0          --- -       1best avg score: 1 (+0.5) - 1best avg model score: 5 (+2.5) +       1best avg score: 0.81873 (+0.40937) + 1best avg model score: 6             avg # pairs: 0 -        avg # rank err: 0 (meaningless) -     avg # margin viol: 0 -       k-best loss imp: 100% -    non0 feature count: 6 +   non-0 feature count: 4             avg list sz: 4             avg f count: 3  (time 0 min, 0 s/S) -Writing weights file to '-' ... -house_rule	2 -little_rule	3 -Glue	-4 -PassThrough	-5 -small_rule	-2 -shell_rule	-1 -done -  --- -Best iteration: 2 [SCORE 'bleu'=1]. +Best iteration: 2 [GOLD = 0.81873].  This took 0 min. diff --git a/training/dtrain/examples/toy/weights b/training/dtrain/examples/toy/weights new file mode 100644 index 00000000..f6f32772 --- /dev/null +++ b/training/dtrain/examples/toy/weights @@ -0,0 +1,4 @@ +house_rule 3 +little_rule 3 +Glue -12 +PassThrough -15 diff --git a/training/dtrain/lplp.rb b/training/dtrain/lplp.rb index 86e835e8..a1fcd1a3 100755 --- a/training/dtrain/lplp.rb +++ b/training/dtrain/lplp.rb @@ -19,7 +19,8 @@ end  # stats  def median(feature_column, n) -  return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0}).sort[feature_column.size/2] +  return feature_column.concat(0.step(n-feature_column.size-1).map{|i|0})\ +    .sort[feature_column.size/2]  end  def mean(feature_column, n) @@ -28,7 +29,7 @@ end  # selection  def select_k(weights, norm_fun, n, k=10000) -  weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p| +  weights.sort{|a,b| norm_fun.call(b[1], n)<=>norm_fun.call(a[1], n)}.each { |p|      puts "#{p[0]}\t#{mean(p[1], n)}"      k -= 1      if k == 0 then break end diff --git a/training/dtrain/pairs.h b/training/dtrain/pairs.h deleted file mode 100644 index dea0dabc..00000000 --- a/training/dtrain/pairs.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _DTRAIN_PAIRS_H_ -#define _DTRAIN_PAIRS_H_ - -namespace dtrain -{ - -bool -CmpHypsByScore(ScoredHyp a, ScoredHyp b) -{ -  return a.score > b.score; -} - -/* - * multipartite ranking - *  sort (descending) by bleu - *  compare top X (hi) to middle Y (med) and low X (lo) - *  cmp middle Y to low X - */ -inline void -MakePairs(vector<ScoredHyp>* s, -          vector<pair<ScoredHyp,ScoredHyp> >& training, -          bool misranked_only, -          float hi_lo) -{ -  unsigned sz = s->size(); -  if (sz < 2) return; -  sort(s->begin(), s->end(), CmpHypsByScore); -  unsigned sep = round(sz*hi_lo); -  // hi vs. med vs. low -  unsigned sep_hi = sep; -  if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi; -  else sep_hi = 1; -  for (unsigned i = 0; i < sep_hi; i++) { -    for (unsigned j = sep_hi; j < sz; j++) { -      if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; -      if ((*s)[i].score != (*s)[j].score) -        training.push_back(make_pair((*s)[i], (*s)[j])); -    } -  } -  // med vs. low -  unsigned sep_lo = sz-sep; -  while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo; -  for (unsigned i = sep_hi; i < sep_lo; i++) { -    for (unsigned j = sep_lo; j < sz; j++) { -      if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; -      if ((*s)[i].score != (*s)[j].score) -        training.push_back(make_pair((*s)[i], (*s)[j])); -    } -  } -} - -} // namespace - -#endif - diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb index 9315b7f7..29f3e609 100755 --- a/training/dtrain/parallelize.rb +++ b/training/dtrain/parallelize.rb @@ -3,61 +3,52 @@  require 'trollop'  require 'zipf' -def usage -  STDERR.write "Usage: " -  STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"] [--extra_qsub \"-l mem_free=24G\"]\n" -  exit 1 +conf = Trollop::options do +  opt :config,                    "dtrain configuration",                  :type => :string +  opt :input,                     "input as bitext (f ||| e)",             :type => :string +  opt :epochs,                    "number of epochs",                      :type => :int, :default => 10 +  opt :lplp_args,                 "arguments for lplp.rb",                 :type => :string, :default => "l2 select_k 100000" +  opt :randomize,                 "randomize shards once",                 :type => :bool,   :default => false, :short => '-z' +  opt :reshard,                   "randomize after each epoch",            :type => :bool,   :default => false, :short => '-y' +  opt :shards,                    "number of shards",                      :type => :int +  opt :weights,                   "input weights for first epoch",         :type => :string, :default => '' +  opt :per_shard_decoder_configs, "give custom decoder config per shard",  :type => :string, :short => '-o' +  opt :processes_at_once,         "jobs to run at oce",                    :type => :int,    :default => 9999 +  opt :qsub,                      "use qsub",                              :type => :bool,   :default => false +  opt :qsub_args,                 "extra args for qsub",                   :type => :string, :default => "-l h_vmem=5G" +  opt :dtrain_binary,             "path to dtrain binary",                 :type => :string  end -opts = Trollop::options do -  opt :config, "dtrain config file", :type => :string -  opt :epochs, "number of epochs", :type => :int, :default => 10 -  opt :lplp_args, "arguments for lplp.rb", :type => :string, :default => "l2 select_k 100000" -  opt :randomize, "randomize shards before each epoch", :type => :bool, :short => '-z', :default => false -  opt :reshard, "reshard after each epoch", :type => :bool, :short => '-y', :default => false -  opt :shards, "number of shards", :type => :int -  opt :processes_at_once, "have this number (max) running at the same time", :type => :int, :default => 9999 -  opt :input, "input (bitext f ||| e ||| ...)", :type => :string -  opt :dtrain_binary, "path to dtrain binary", :type => :string -  opt :qsub, "use qsub", :type => :bool, :default => false -  opt :qsub_args, "extra args for qsub", :type => :string, :default => "-l h_vmem=5G" -  opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w' -  opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o' -end -usage if not opts[:config]&&opts[:shards]&&opts[:input] -  dtrain_dir = File.expand_path File.dirname(__FILE__) -if not opts[:dtrain_binary] +if not conf[:dtrain_binary]    dtrain_bin = "#{dtrain_dir}/dtrain"  else -  dtrain_bin = opts[:dtrain_binary] +  dtrain_bin = conf[:dtrain_binary]  end -ruby       = '/usr/bin/ruby'  lplp_rb    = "#{dtrain_dir}/lplp.rb" -lplp_args  = opts[:lplp_args] -cat        = '/bin/cat' +lplp_args  = conf[:lplp_args] -ini        = opts[:config] -epochs     = opts[:epochs] -rand       = opts[:randomize] -reshard    = opts[:reshard] -predefined_shards = false +dtrain_conf       = conf[:config] +epochs            = conf[:epochs] +rand              = conf[:randomize] +reshard           = conf[:reshard] +predefined_shards         = false  per_shard_decoder_configs = false -if opts[:shards] == 0 +if conf[:shards] == 0    predefined_shards = true    num_shards = 0 -  per_shard_decoder_configs = true if opts[:per_shard_decoder_configs] +  per_shard_decoder_configs = true if conf[:per_shard_decoder_configs]  else -  num_shards = opts[:shards] +  num_shards = conf[:shards]  end -input = opts[:input] -use_qsub       = opts[:qsub] -shards_at_once = opts[:processes_at_once] -first_input_weights  = opts[:first_input_weights] +input               = conf[:input] +use_qsub            = conf[:qsub] +shards_at_once      = conf[:processes_at_once] +first_input_weights = conf[:weights]  `mkdir work` -def make_shards(input, num_shards, epoch, rand) +def make_shards input, num_shards, epoch, rand    lc = `wc -l #{input}`.split.first.to_i    index = (0..lc-1).to_a    index.reverse! @@ -97,7 +88,8 @@ input_files = []  if predefined_shards    input_files = File.new(input).readlines.map {|i| i.strip }    if per_shard_decoder_configs -    decoder_configs = File.new(opts[:per_shard_decoder_configs]).readlines.map {|i| i.strip} +    decoder_configs = ReadFile.readlines_strip(conf[:per_shard_decoder_configs] +                                              ).map { |i| i.strip }    end    num_shards = input_files.size  else @@ -118,22 +110,29 @@ end        qsub_str_start = qsub_str_end = ''        local_end = ''        if use_qsub -        qsub_str_start = "qsub #{opts[:qsub_args]} -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \"" +        qsub_str_start = "qsub #{conf[:qsub_args]} -cwd -sync y -b y -j y\ +                           -o work/out.#{shard}.#{epoch}\ +                           -N dtrain.#{shard}.#{epoch} \""          qsub_str_end = "\""          local_end = ''        else          local_end = "2>work/out.#{shard}.#{epoch}"        end        if per_shard_decoder_configs -        cdec_cfg = "--decoder_config #{decoder_configs[shard]}" +        cdec_conf = "--decoder_config #{decoder_configs[shard]}"        else -        cdec_cfg = "" +        cdec_conf = ""        end        if first_input_weights!='' && epoch == 0          input_weights = "--input_weights #{first_input_weights}"        end        pids << Kernel.fork { -        `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\ +        puts         "#{qsub_str_start}#{dtrain_bin} -c #{dtrain_conf} #{cdec_conf}\ +          #{input_weights}\ +          --bitext #{input_files[shard]}\ +          --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}" +        `#{qsub_str_start}#{dtrain_bin} -c #{dtrain_conf} #{cdec_conf}\ +          #{input_weights}\            --bitext #{input_files[shard]}\            --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`        } @@ -144,8 +143,9 @@ end      pids.each { |pid| Process.wait(pid) }      pids.clear    end -  `#{cat} work/weights.*.#{epoch} > work/weights_cat` -  `#{ruby} #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat > work/weights.#{epoch}` +  `cat work/weights.*.#{epoch} > work/weights_cat` +  `ruby #{lplp_rb} #{lplp_args} #{num_shards} < work/weights_cat\ +                                                 > work/weights.#{epoch}`    if rand and reshard and epoch+1!=epochs      input_files, num_shards = make_shards input, num_shards, epoch+1, rand    end diff --git a/training/dtrain/sample.h b/training/dtrain/sample.h index 64d93cb0..c3586c58 100644 --- a/training/dtrain/sample.h +++ b/training/dtrain/sample.h @@ -9,14 +9,16 @@ namespace dtrain  struct ScoredKbest : public DecoderObserver  { -  const unsigned k_; +  const size_t k_;    vector<ScoredHyp> s_; -  unsigned src_len_; +  size_t src_len_;    PerSentenceBleuScorer* scorer_;    vector<vector<WordID> >* refs_; -  unsigned f_count_, sz_; +  vector<Ngrams>* ref_ngs_; +  vector<size_t>* ref_ls_; +  size_t f_count_, sz_; -  ScoredKbest(const unsigned k, PerSentenceBleuScorer* scorer) : +  ScoredKbest(const size_t k, PerSentenceBleuScorer* scorer) :      k_(k), scorer_(scorer) {}    virtual void @@ -26,7 +28,7 @@ struct ScoredKbest : public DecoderObserver      s_.clear(); sz_ = f_count_ = 0;      KBest::KBestDerivations<vector<WordID>, ESentenceTraversal,        KBest::FilterUnique, prob_t, EdgeProb> kbest(*hg, k_); -    for (unsigned i = 0; i < k_; ++i) { +    for (size_t i = 0; i < k_; ++i) {        const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal, KBest::FilterUnique,                prob_t, EdgeProb>::Derivation* d =              kbest.LazyKthBest(hg->nodes_.size() - 1, i); @@ -36,7 +38,7 @@ struct ScoredKbest : public DecoderObserver        h.f = d->feature_values;        h.model = log(d->score);        h.rank = i; -      h.score = scorer_->Score(h.w, *refs_); +      h.gold = scorer_->Score(h.w, *ref_ngs_, *ref_ls_);        s_.push_back(h);        sz_++;        f_count_ += h.f.size(); @@ -44,9 +46,13 @@ struct ScoredKbest : public DecoderObserver    }    vector<ScoredHyp>* GetSamples() { return &s_; } -  inline void SetReference(vector<vector<WordID> >& refs) { refs_ = &refs; } -  inline unsigned GetFeatureCount() { return f_count_; } -  inline unsigned GetSize() { return sz_; } +  inline void SetReference(vector<Ngrams>& ngs, vector<size_t>& ls) +  { +    ref_ngs_ = &ngs; +    ref_ls_ = &ls; +  } +  inline size_t GetFeatureCount() { return f_count_; } +  inline size_t GetSize() { return sz_; }  }; diff --git a/training/dtrain/score.h b/training/dtrain/score.h index c727dd30..d51aef82 100644 --- a/training/dtrain/score.h +++ b/training/dtrain/score.h @@ -8,17 +8,17 @@ namespace dtrain  struct NgramCounts  { -  unsigned N_; -  map<unsigned, score_t> clipped_; -  map<unsigned, score_t> sum_; +  size_t N_; +  map<size_t, weight_t> clipped_; +  map<size_t, weight_t> sum_; -  NgramCounts(const unsigned N) : N_(N) { Zero(); } +  NgramCounts(const size_t N) : N_(N) { Zero(); }    inline void    operator+=(const NgramCounts& rhs)    {      if (rhs.N_ > N_) Resize(rhs.N_); -    for (unsigned i = 0; i < N_; i++) { +    for (size_t i = 0; i < N_; i++) {        this->clipped_[i] += rhs.clipped_.find(i)->second;        this->sum_[i] += rhs.sum_.find(i)->second;      } @@ -34,16 +34,16 @@ struct NgramCounts    }    inline void -  operator*=(const score_t rhs) +  operator*=(const weight_t rhs)    { -    for (unsigned i = 0; i < N_; i++) { +    for (size_t i = 0; i < N_; i++) {        this->clipped_[i] *= rhs;        this->sum_[i] *= rhs;      }    }    inline void -  Add(const unsigned count, const unsigned ref_count, const unsigned i) +  Add(const size_t count, const size_t ref_count, const size_t i)    {      assert(i < N_);      if (count > ref_count) { @@ -57,40 +57,31 @@ struct NgramCounts    inline void    Zero()    { -    for (unsigned i = 0; i < N_; i++) { +    for (size_t i = 0; i < N_; i++) {        clipped_[i] = 0.;        sum_[i] = 0.;      }    }    inline void -  One() +  Print(ostream& os=cerr)    { -    for (unsigned i = 0; i < N_; i++) { -      clipped_[i] = 1.; -      sum_[i] = 1.; +    for (size_t i = 0; i < N_; i++) { +      os << i+1 << "grams (clipped):\t" << clipped_[i] << endl; +      os << i+1 << "grams:\t\t\t" << sum_[i] << endl;      }    } -  inline void -  Print() -  { -    for (unsigned i = 0; i < N_; i++) { -      cout << i+1 << "grams (clipped):\t" << clipped_[i] << endl; -      cout << i+1 << "grams:\t\t\t" << sum_[i] << endl; -    } -  } - -  inline void Resize(unsigned N) +  inline void Resize(size_t N)    {      if (N == N_) return;      else if (N > N_) { -      for (unsigned i = N_; i < N; i++) { +      for (size_t i = N_; i < N; i++) {          clipped_[i] = 0.;          sum_[i] = 0.;        }      } else { // N < N_ -      for (unsigned i = N_-1; i > N-1; i--) { +      for (size_t i = N_-1; i > N-1; i--) {          clipped_.erase(i);          sum_.erase(i);        } @@ -99,16 +90,16 @@ struct NgramCounts    }  }; -typedef map<vector<WordID>, unsigned> Ngrams; +typedef map<vector<WordID>, size_t> Ngrams;  inline Ngrams -MakeNgrams(const vector<WordID>& s, const unsigned N) +MakeNgrams(const vector<WordID>& s, const size_t N)  {    Ngrams ngrams;    vector<WordID> ng;    for (size_t i = 0; i < s.size(); i++) {      ng.clear(); -    for (unsigned j = i; j < min(i+N, s.size()); j++) { +    for (size_t j = i; j < min(i+N, s.size()); j++) {        ng.push_back(s[j]);        ngrams[ng]++;      } @@ -118,24 +109,21 @@ MakeNgrams(const vector<WordID>& s, const unsigned N)  }  inline NgramCounts -MakeNgramCounts(const vector<WordID>& hyp, const vector<vector<WordID> >& refs, const unsigned N) +MakeNgramCounts(const vector<WordID>& hyp, +                const vector<Ngrams>& ref, +                const size_t N)  {    Ngrams hyp_ngrams = MakeNgrams(hyp, N); -  vector<Ngrams> refs_ngrams; -  for (auto r: refs) { -    Ngrams r_ng = MakeNgrams(r, N); -    refs_ngrams.push_back(r_ng); -  }    NgramCounts counts(N);    Ngrams::iterator it, ti;    for (it = hyp_ngrams.begin(); it != hyp_ngrams.end(); it++) { -    unsigned max_ref_count = 0; -    for (auto ref_ngrams: refs_ngrams) { -      ti = ref_ngrams.find(it->first); -      if (ti != ref_ngrams.end()) +    size_t max_ref_count = 0; +    for (auto r: ref) { +      ti = r.find(it->first); +      if (ti != r.end())          max_ref_count = max(max_ref_count, ti->second);      } -    counts.Add(it->second, min(it->second, max_ref_count), it->first.size() - 1); +    counts.Add(it->second, min(it->second, max_ref_count), it->first.size()-1);    }    return counts; @@ -150,56 +138,65 @@ MakeNgramCounts(const vector<WordID>& hyp, const vector<vector<WordID> >& refs,   * [simply add 1 to reference length for calculation of BP]   *   */ -  struct PerSentenceBleuScorer  { -  const unsigned N_; -  vector<score_t> w_; +  const size_t     N_; +  vector<weight_t> w_; -  PerSentenceBleuScorer(unsigned n) : N_(n) +  PerSentenceBleuScorer(size_t n) : N_(n)    { -    for (auto i = 1; i <= N_; i++) +    for (size_t i = 1; i <= N_; i++)        w_.push_back(1.0/N_);    } -  inline score_t -  BrevityPenalty(const unsigned hyp_len, const unsigned ref_len) +  inline weight_t +  BrevityPenalty(const size_t hl, const size_t rl)    { -    if (hyp_len > ref_len) return 1; -    return exp(1 - (score_t)ref_len/hyp_len); +    if (hl > rl) +      return 1; + +    return exp(1 - (weight_t)rl/hl);    } -  score_t -  Score(const vector<WordID>& hyp, const vector<vector<WordID> >& refs) +  weight_t +  Score(const vector<WordID>& hyp, +        const vector<Ngrams>& ref_ngs, +        const vector<size_t>& ref_ls)    { -    unsigned hyp_len = hyp.size(), ref_len = 0; +    size_t hl = hyp.size(), rl = 0; +    if (hl == 0) return 0.;      // best match reference length -    if (refs.size() == 1)  { -      ref_len = refs[0].size(); +    if (ref_ls.size() == 1)  { +      rl = ref_ls.front();      } else { -      unsigned i = 0, best_idx = 0; -      unsigned best = std::numeric_limits<unsigned>::max(); -      for (auto r: refs) { -        unsigned d = abs(hyp_len-r.size()); -        if (best > d) best_idx = i; +      size_t i = 0, best_idx = 0; +      size_t best = numeric_limits<size_t>::max(); +      for (auto l: ref_ls) { +        size_t d = abs(hl-l); +        if (d < best) {  +          best_idx = i; +          best = d; +        } +        i += 1;        } -      ref_len = refs[best_idx].size(); +      rl = ref_ls[best_idx];      } -    if (hyp_len == 0 || ref_len == 0) return 0.; -    NgramCounts counts = MakeNgramCounts(hyp, refs, N_); -    unsigned M = N_; -    vector<score_t> v = w_; -    if (ref_len < N_) { -      M = ref_len; -      for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); +    if (rl == 0) return 0.; +    NgramCounts counts = MakeNgramCounts(hyp, ref_ngs, N_); +    size_t M = N_; +    vector<weight_t> v = w_; +    if (rl < N_) { +      M = rl; +      for (size_t i = 0; i < M; i++) v[i] = 1/((weight_t)M);      } -    score_t sum = 0, add = 0; -    for (unsigned i = 0; i < M; i++) { +    weight_t sum = 0, add = 0; +    for (size_t i = 0; i < M; i++) {        if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.;        if (i == 1) add = 1; -      sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); +      sum += v[i] * log(((weight_t)counts.clipped_[i] + add)/((counts.sum_[i] + add)));      } -    return  BrevityPenalty(hyp_len, ref_len+1) * exp(sum); + +    return  BrevityPenalty(hl, rl+1) * exp(sum);    }  }; diff --git a/training/dtrain/update.h b/training/dtrain/update.h new file mode 100644 index 00000000..57671ce1 --- /dev/null +++ b/training/dtrain/update.h @@ -0,0 +1,65 @@ +#ifndef _DTRAIN_UPDATE_H_ +#define _DTRAIN_UPDATE_H_ + +namespace dtrain +{ + +bool +CmpHypsByGold(ScoredHyp a, ScoredHyp b) +{ +  return a.gold > b.gold; +} + +/* + * multipartite ranking + *  sort (descending) by bleu + *  compare top X (hi) to middle Y (med) and low X (lo) + *  cmp middle Y to low X + */ +inline size_t +CollectUpdates(vector<ScoredHyp>* s, +               SparseVector<weight_t>& updates, +               float margin=1.0) +{ +  size_t num_pairs = 0; +  size_t sz = s->size(); +  if (sz < 2) return 0; +  sort(s->begin(), s->end(), CmpHypsByGold); +  size_t sep = round(sz*0.1); +  size_t sep_hi = sep; +  if (sz > 4) { +    while +      (sep_hi < sz && (*s)[sep_hi-1].gold == (*s)[sep_hi].gold) ++sep_hi; +  } +  else sep_hi = 1; +  for (size_t i = 0; i < sep_hi; i++) { +    for (size_t j = sep_hi; j < sz; j++) { +      if (((*s)[i].model-(*s)[j].model) > margin) +        continue; +      if ((*s)[i].gold != (*s)[j].gold) { +        updates += (*s)[i].f-(*s)[j].f; +        num_pairs++; +      } +    } +  } +  size_t sep_lo = sz-sep; +  while (sep_lo > 0 && (*s)[sep_lo-1].gold == (*s)[sep_lo].gold) +    --sep_lo; +  for (size_t i = sep_hi; i < sep_lo; i++) { +    for (size_t j = sep_lo; j < sz; j++) { +      if (((*s)[i].model-(*s)[j].model) > margin) +        continue; +      if ((*s)[i].gold != (*s)[j].gold) { +        updates += (*s)[i].f-(*s)[j].f; +        num_pairs++; +      } +    } +  } + +  return num_pairs; +} + +} // namespace + +#endif + | 
