diff options
Diffstat (limited to 'dtrain/test')
| -rw-r--r-- | dtrain/test/example/cdec.ini | 24 | ||||
| -rw-r--r-- | dtrain/test/example/dtrain.ini | 20 | ||||
| -rw-r--r-- | dtrain/test/example/nc-wmt11.1k.gz | bin | 0 -> 21185883 bytes | |||
| -rw-r--r-- | dtrain/test/example/nc-wmt11.en.srilm.gz | bin | 0 -> 16017291 bytes | |||
| -rw-r--r-- | dtrain/test/mtm11/logreg_cd/bin_class.cc | 4 | ||||
| -rw-r--r-- | dtrain/test/mtm11/logreg_cd/bin_class.h | 22 | ||||
| -rw-r--r-- | dtrain/test/mtm11/logreg_cd/log_reg.cc | 39 | ||||
| -rw-r--r-- | dtrain/test/mtm11/logreg_cd/log_reg.h | 14 | ||||
| -rw-r--r-- | dtrain/test/mtm11/mira_update/Hildreth.cpp | 187 | ||||
| -rw-r--r-- | dtrain/test/mtm11/mira_update/Hildreth.h | 10 | ||||
| -rw-r--r-- | dtrain/test/mtm11/mira_update/dtrain.cc | 532 | ||||
| -rw-r--r-- | dtrain/test/mtm11/mira_update/sample.h | 101 | ||||
| -rw-r--r-- | dtrain/test/toy/cdec.ini | 2 | ||||
| -rw-r--r-- | dtrain/test/toy/dtrain.ini | 12 | ||||
| -rw-r--r-- | dtrain/test/toy/input | 2 | 
15 files changed, 969 insertions, 0 deletions
| diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini new file mode 100644 index 00000000..fe5ca759 --- /dev/null +++ b/dtrain/test/example/cdec.ini @@ -0,0 +1,24 @@ +formalism=scfg +add_pass_through_rules=true +scfg_max_span_limit=15 +intersection_strategy=cube_pruning +cubepruning_pop_limit=30 +feature_function=WordPenalty +feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz +# all currently working feature function for translation: +#feature_function=ArityPenalty +#feature_function=CMR2008ReorderingFeatures +#feature_function=Dwarf +#feature_function=InputIndicator +#feature_function=LexNullJump +#feature_function=NewJump +#feature_function=NgramFeatures +#feature_function=NonLatinCount +#feature_function=OutputIndicator +feature_function=RuleIdentityFeatures +feature_function=RuleNgramFeatures +feature_function=RuleShape +#feature_function=SourceSpanSizeFeatures +#feature_function=SourceWordPenalty +#feature_function=SpanFeatures +# ^^^ features active that were used in the ACL paper diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini new file mode 100644 index 00000000..68173e11 --- /dev/null +++ b/dtrain/test/example/dtrain.ini @@ -0,0 +1,20 @@ +input=test/example/nc-wmt11.1k.gz    # use '-' for stdin +output=-                             # a weights file or stdout +decoder_config=test/example/cdec.ini # ini for cdec +# these will be printed on each iteration +print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough +tmp=/tmp +stop_after=10 # stop iteration after 10 inputs  + +# interesting stuff +epochs=3                # run over input 3 times +k=200                   # use 100best lists +N=4                     # optimize (approx) BLEU4 +learning_rate=0.0001    # learning rate +gamma=0.00001           # use SVM reg +scorer=stupid_bleu      # use stupid BLEU+1 approx. +sample_from=kbest       # use kbest lists (as opposed to forest) +filter=uniq             # only uniq entries in kbest +pair_sampling=108010    # 10 vs 80 vs 10 and 80 vs 10 +pair_threshold=0        # minimum distance in BLEU +select_weights=last     # just output last weights diff --git a/dtrain/test/example/nc-wmt11.1k.gz b/dtrain/test/example/nc-wmt11.1k.gzBinary files differ new file mode 100644 index 00000000..45496cd8 --- /dev/null +++ b/dtrain/test/example/nc-wmt11.1k.gz diff --git a/dtrain/test/example/nc-wmt11.en.srilm.gz b/dtrain/test/example/nc-wmt11.en.srilm.gzBinary files differ new file mode 100644 index 00000000..7ce81057 --- /dev/null +++ b/dtrain/test/example/nc-wmt11.en.srilm.gz diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.cc b/dtrain/test/mtm11/logreg_cd/bin_class.cc new file mode 100644 index 00000000..19bcde25 --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/bin_class.cc @@ -0,0 +1,4 @@ +#include "bin_class.h" + +Objective::~Objective() {} + diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.h b/dtrain/test/mtm11/logreg_cd/bin_class.h new file mode 100644 index 00000000..3466109a --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/bin_class.h @@ -0,0 +1,22 @@ +#ifndef _BIN_CLASS_H_ +#define _BIN_CLASS_H_ + +#include <vector> +#include "sparse_vector.h" + +struct TrainingInstance { +  // TODO add other info? loss for MIRA-type updates? +  SparseVector<double> x_feature_map; +  bool y; +}; + +struct Objective { +  virtual ~Objective(); + +  // returns f(x) and f'(x) +  virtual double ObjectiveAndGradient(const SparseVector<double>& x, +                  const std::vector<TrainingInstance>& training_instances, +                  SparseVector<double>* g) const = 0; +}; + +#endif diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.cc b/dtrain/test/mtm11/logreg_cd/log_reg.cc new file mode 100644 index 00000000..ec2331fe --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/log_reg.cc @@ -0,0 +1,39 @@ +#include "log_reg.h" + +#include <vector> +#include <cmath> + +#include "sparse_vector.h" + +using namespace std; + +double LogisticRegression::ObjectiveAndGradient(const SparseVector<double>& x, +                              const vector<TrainingInstance>& training_instances, +                              SparseVector<double>* g) const { +  double cll = 0; +  for (int i = 0; i < training_instances.size(); ++i) { +    const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0] +    double lp_false = dotprod; +    double lp_true = -dotprod; +    if (0 < lp_true) { +      lp_true += log1p(exp(-lp_true)); +      lp_false = log1p(exp(lp_false)); +    } else { +      lp_true = log1p(exp(lp_true)); +      lp_false += log1p(exp(-lp_false)); +    } +    lp_true *= -1; +    lp_false *= -1; +    if (training_instances[i].y) {  // true label +      cll -= lp_true; +      (*g) -= training_instances[i].x_feature_map * exp(lp_false); +      // (*g)[0] -= exp(lp_false); // bias +    } else {                  // false label +      cll -= lp_false; +      (*g) += training_instances[i].x_feature_map * exp(lp_true); +      // g += corpus[i].second * exp(lp_true); +    } +  } +  return cll; +} + diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.h b/dtrain/test/mtm11/logreg_cd/log_reg.h new file mode 100644 index 00000000..ecc560b8 --- /dev/null +++ b/dtrain/test/mtm11/logreg_cd/log_reg.h @@ -0,0 +1,14 @@ +#ifndef _LOG_REG_H_ +#define _LOG_REG_H_ + +#include <vector> +#include "sparse_vector.h" +#include "bin_class.h" + +struct LogisticRegression : public Objective { +  double ObjectiveAndGradient(const SparseVector<double>& x, +                              const std::vector<TrainingInstance>& training_instances, +                              SparseVector<double>* g) const; +}; + +#endif diff --git a/dtrain/test/mtm11/mira_update/Hildreth.cpp b/dtrain/test/mtm11/mira_update/Hildreth.cpp new file mode 100644 index 00000000..0e67eb15 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/Hildreth.cpp @@ -0,0 +1,187 @@ +#include "Hildreth.h" +#include "sparse_vector.h" + +using namespace std; + +namespace Mira { +   vector<double> Hildreth::optimise (vector< SparseVector<double> >& a, vector<double>& b) { + +    size_t i; +    int max_iter = 10000; +    double eps = 0.00000001; +    double zero = 0.000000000001; + +    vector<double> alpha ( b.size() ); +    vector<double> F ( b.size() ); +    vector<double> kkt ( b.size() ); + +    double max_kkt = -1e100; + +    size_t K = b.size(); + +    double A[K][K]; +    bool is_computed[K]; +    for ( i = 0; i < K; i++ ) +    { +      A[i][i] = a[i].dot(a[i]); +      is_computed[i] = false; +    } + +    int max_kkt_i = -1; + + +    for ( i = 0; i < b.size(); i++ ) +    { +      F[i] = b[i]; +      kkt[i] = F[i]; +      if ( kkt[i] > max_kkt ) +      { +        max_kkt = kkt[i]; +        max_kkt_i = i; +      } +    } + +    int iter = 0; +    double diff_alpha; +    double try_alpha; +    double add_alpha; + +    while ( max_kkt >= eps && iter < max_iter ) +    { + +      diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; +      try_alpha = alpha[max_kkt_i] + diff_alpha; +      add_alpha = 0.0; + +      if ( try_alpha < 0.0 ) +        add_alpha = -1.0 * alpha[max_kkt_i]; +      else +        add_alpha = diff_alpha; + +      alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; + +      if ( !is_computed[max_kkt_i] ) +      { +        for ( i = 0; i < K; i++ ) +        { +          A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 +          //A[i][max_kkt_i] = 0; // for version 1 +          is_computed[max_kkt_i] = true; +        } +      } + +      for ( i = 0; i < F.size(); i++ ) +      { +        F[i] -= add_alpha * A[i][max_kkt_i]; +        kkt[i] = F[i]; +        if ( alpha[i] > zero ) +          kkt[i] = abs ( F[i] ); +      } +      max_kkt = -1e100; +      max_kkt_i = -1; +      for ( i = 0; i < F.size(); i++ ) +        if ( kkt[i] > max_kkt ) +        { +          max_kkt = kkt[i]; +          max_kkt_i = i; +        } + +      iter++; +    } + +    return alpha; +  } + +  vector<double> Hildreth::optimise (vector< SparseVector<double> >& a, vector<double>& b, double C) { + +    size_t i; +    int max_iter = 10000; +    double eps = 0.00000001; +    double zero = 0.000000000001; + +    vector<double> alpha ( b.size() ); +    vector<double> F ( b.size() ); +    vector<double> kkt ( b.size() ); + +    double max_kkt = -1e100; + +    size_t K = b.size(); + +    double A[K][K]; +    bool is_computed[K]; +    for ( i = 0; i < K; i++ ) +    { +      A[i][i] = a[i].dot(a[i]); +      is_computed[i] = false; +    } + +    int max_kkt_i = -1; + + +    for ( i = 0; i < b.size(); i++ ) +    { +      F[i] = b[i]; +      kkt[i] = F[i]; +      if ( kkt[i] > max_kkt ) +      { +        max_kkt = kkt[i]; +        max_kkt_i = i; +      } +    } + +    int iter = 0; +    double diff_alpha; +    double try_alpha; +    double add_alpha; + +    while ( max_kkt >= eps && iter < max_iter ) +    { + +      diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i]; +      try_alpha = alpha[max_kkt_i] + diff_alpha; +      add_alpha = 0.0; + +      if ( try_alpha < 0.0 ) +        add_alpha = -1.0 * alpha[max_kkt_i]; +      else if (try_alpha > C) +				add_alpha = C - alpha[max_kkt_i]; +      else +        add_alpha = diff_alpha; + +      alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha; + +      if ( !is_computed[max_kkt_i] ) +      { +        for ( i = 0; i < K; i++ ) +        { +          A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1 +          //A[i][max_kkt_i] = 0; // for version 1 +          is_computed[max_kkt_i] = true; +        } +      } + +      for ( i = 0; i < F.size(); i++ ) +      { +        F[i] -= add_alpha * A[i][max_kkt_i]; +        kkt[i] = F[i]; +        if (alpha[i] > C - zero) +					kkt[i]=-kkt[i]; +				else if (alpha[i] > zero) +					kkt[i] = abs(F[i]); + +      } +      max_kkt = -1e100; +      max_kkt_i = -1; +      for ( i = 0; i < F.size(); i++ ) +        if ( kkt[i] > max_kkt ) +        { +          max_kkt = kkt[i]; +          max_kkt_i = i; +        } + +      iter++; +    } + +    return alpha; +  } +} diff --git a/dtrain/test/mtm11/mira_update/Hildreth.h b/dtrain/test/mtm11/mira_update/Hildreth.h new file mode 100644 index 00000000..8d791085 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/Hildreth.h @@ -0,0 +1,10 @@ +#include "sparse_vector.h" + +namespace Mira { +  class Hildreth { +  public : +    static std::vector<double> optimise(std::vector< SparseVector<double> >& a, std::vector<double>& b); +    static std::vector<double> optimise(std::vector< SparseVector<double> >& a, std::vector<double>& b, double C); +  }; +} + diff --git a/dtrain/test/mtm11/mira_update/dtrain.cc b/dtrain/test/mtm11/mira_update/dtrain.cc new file mode 100644 index 00000000..933417a4 --- /dev/null +++ b/dtrain/test/mtm11/mira_update/dtrain.cc @@ -0,0 +1,532 @@ +#include "common.h" +#include "kbestget.h" +#include "util.h" +#include "sample.h" +#include "Hildreth.h" + +#include "ksampler.h" + +// boost compression +#include <boost/iostreams/device/file.hpp>  +#include <boost/iostreams/filtering_stream.hpp> +#include <boost/iostreams/filter/gzip.hpp> +//#include <boost/iostreams/filter/zlib.hpp> +//#include <boost/iostreams/filter/bzip2.hpp> +using namespace boost::iostreams; + + +#ifdef DTRAIN_DEBUG +#include "tests.h" +#endif + + +/* + * init + * + */ +bool +init(int argc, char** argv, po::variables_map* cfg) +{ +  po::options_description conff( "Configuration File Options" ); +  size_t k, N, T, stop, n_pairs; +  string s, f, update_type; +  conff.add_options() +    ( "decoder_config", po::value<string>(),                            "configuration file for cdec" ) +    ( "kbest",          po::value<size_t>(&k)->default_value(DTRAIN_DEFAULT_K),         "k for kbest" ) +    ( "ngrams",         po::value<size_t>(&N)->default_value(DTRAIN_DEFAULT_N),        "N for Ngrams" ) +    ( "filter",         po::value<string>(&f)->default_value("unique"),           "filter kbest list" ) +    ( "epochs",         po::value<size_t>(&T)->default_value(DTRAIN_DEFAULT_T),   "# of iterations T" )  +    ( "input",          po::value<string>(),                                             "input file" ) +    ( "scorer",         po::value<string>(&s)->default_value(DTRAIN_DEFAULT_SCORER), "scoring metric" ) +    ( "output",         po::value<string>(),                                    "output weights file" ) +    ( "stop_after",     po::value<size_t>(&stop)->default_value(0),    "stop after X input sentences" ) +    ( "weights_file",   po::value<string>(),      "input weights file (e.g. from previous iteration)" ) +    ( "wprint",         po::value<string>(),                     "weights to print on each iteration" ) +    ( "noup",           po::value<bool>()->zero_tokens(),                     "do not update weights" ); + +  po::options_description clo("Command Line Options"); +  clo.add_options() +    ( "config,c",         po::value<string>(),              "dtrain config file" ) +    ( "quiet,q",          po::value<bool>()->zero_tokens(),           "be quiet" ) +    ( "update-type",      po::value<string>(&update_type)->default_value("mira"), "perceptron or mira" ) +    ( "n-pairs",          po::value<size_t>(&n_pairs)->default_value(10), "number of pairs used to compute update" ) +    ( "verbose,v",        po::value<bool>()->zero_tokens(),         "be verbose" ) +#ifndef DTRAIN_DEBUG +    ; +#else +    ( "test", "run tests and exit"); +#endif +  po::options_description config_options, cmdline_options; + +  config_options.add(conff); +  cmdline_options.add(clo); +  cmdline_options.add(conff); + +  po::store( parse_command_line(argc, argv, cmdline_options), *cfg ); +  if ( cfg->count("config") ) { +    ifstream config( (*cfg)["config"].as<string>().c_str() ); +    po::store( po::parse_config_file(config, config_options), *cfg ); +  } +  po::notify(*cfg); + +  if ( !cfg->count("decoder_config") || !cfg->count("input") ) {  +    cerr << cmdline_options << endl; +    return false; +  } +  if ( cfg->count("noup") && cfg->count("decode") ) { +    cerr << "You can't use 'noup' and 'decode' at once." << endl; +    return false; +  } +  if ( cfg->count("filter") && (*cfg)["filter"].as<string>() != "unique" +       && (*cfg)["filter"].as<string>() != "no" ) { +    cerr << "Wrong 'filter' type: '" << (*cfg)["filter"].as<string>() << "'." << endl; +  } +  #ifdef DTRAIN_DEBUG        +  if ( !cfg->count("test") ) { +    cerr << cmdline_options << endl; +    return false; +  } +  #endif +  return true; +} + + +// output formatting +ostream& _nopos( ostream& out ) { return out << resetiosflags( ios::showpos ); } +ostream& _pos( ostream& out ) { return out << setiosflags( ios::showpos ); } +ostream& _prec2( ostream& out ) { return out << setprecision(2); } +ostream& _prec5( ostream& out ) { return out << setprecision(5); } + + + + +/* + * dtrain + * + */ +int +main( int argc, char** argv ) +{ +  cout << setprecision( 5 ); +  // handle most parameters +  po::variables_map cfg; +  if ( ! init(argc, argv, &cfg) ) exit(1); // something is wrong  +#ifdef DTRAIN_DEBUG +  if ( cfg.count("test") ) run_tests(); // run tests and exit  +#endif +  bool quiet = false; +  if ( cfg.count("quiet") ) quiet = true; +  bool verbose = false;   +  if ( cfg.count("verbose") ) verbose = true; +  bool noup = false; +  if ( cfg.count("noup") ) noup = true; +  const size_t k = cfg["kbest"].as<size_t>(); +  const size_t N = cfg["ngrams"].as<size_t>();  +  const size_t T = cfg["epochs"].as<size_t>(); +  const size_t stop_after = cfg["stop_after"].as<size_t>(); +  const string filter_type = cfg["filter"].as<string>(); +  const string update_type = cfg["update-type"].as<string>(); +  const size_t n_pairs = cfg["n-pairs"].as<size_t>(); +  const string output_file = cfg["output"].as<string>(); +  if ( !quiet ) { +    cout << endl << "dtrain" << endl << "Parameters:" << endl; +    cout << setw(25) << "k " << k << endl; +    cout << setw(25) << "N " << N << endl; +    cout << setw(25) << "T " << T << endl; +    if ( cfg.count("stop-after") ) +      cout << setw(25) << "stop_after " << stop_after << endl; +    if ( cfg.count("weights") ) +      cout << setw(25) << "weights " << cfg["weights"].as<string>() << endl; +    cout << setw(25) << "input " << "'" << cfg["input"].as<string>() << "'" << endl; +    cout << setw(25) << "filter " << "'" << filter_type << "'" << endl; +  } + +  vector<string> wprint; +  if ( cfg.count("wprint") ) { +    boost::split( wprint, cfg["wprint"].as<string>(), boost::is_any_of(" ") ); +  } + +  // setup decoder, observer +  register_feature_functions(); +  SetSilent(true); +  ReadFile ini_rf( cfg["decoder_config"].as<string>() ); +  if ( !quiet ) +    cout << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; +  Decoder decoder( ini_rf.stream() ); +  //KBestGetter observer( k, filter_type ); +  MT19937 rng; +  KSampler observer( k, &rng ); + +  // scoring metric/scorer +  string scorer_str = cfg["scorer"].as<string>(); +  double (*scorer)( NgramCounts&, const size_t, const size_t, size_t, vector<float> ); +  if ( scorer_str == "bleu" ) { +    scorer = &bleu; +  } else if ( scorer_str == "stupid_bleu" ) { +    scorer = &stupid_bleu; +  } else if ( scorer_str == "smooth_bleu" ) { +    scorer = &smooth_bleu; +  } else if ( scorer_str == "approx_bleu" ) { +    scorer = &approx_bleu; +  } else { +    cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl; +    exit(1); +  } +  // for approx_bleu +  NgramCounts global_counts( N ); // counts for 1 best translations +  size_t global_hyp_len = 0;      // sum hypothesis lengths +  size_t global_ref_len = 0;      // sum reference lengths +  // this is all BLEU implmentations +  vector<float> bleu_weights; // we leave this empty -> 1/N; TODO?  +  if ( !quiet ) cout << setw(26) << "scorer '" << scorer_str << "'" << endl << endl; + +  // init weights +  Weights weights; +  if ( cfg.count("weights") ) weights.InitFromFile( cfg["weights"].as<string>() ); +  SparseVector<double> lambdas; +  weights.InitSparseVector( &lambdas ); +  vector<double> dense_weights; + +  // input +  if ( !quiet && !verbose ) +    cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl; +  string input_fn = cfg["input"].as<string>(); +  ifstream input; +  if ( input_fn != "-" ) input.open( input_fn.c_str() ); +  string in; +  vector<string> in_split; // input: src\tref\tpsg +  vector<string> ref_tok;  // tokenized reference +  vector<WordID> ref_ids;  // reference as vector of WordID +  string grammar_str; + +  // buffer input for t > 0 +  vector<string> src_str_buf;           // source strings, TODO? memory +  vector<vector<WordID> > ref_ids_buf;  // references as WordID vecs +  filtering_ostream grammar_buf;        // written to compressed file in /tmp +  // this is for writing the grammar buffer file +  grammar_buf.push( gzip_compressor() ); +  char grammar_buf_tmp_fn[] = DTRAIN_TMP_DIR"/dtrain-grammars-XXXXXX"; +  mkstemp( grammar_buf_tmp_fn ); +  grammar_buf.push( file_sink(grammar_buf_tmp_fn, ios::binary | ios::trunc) ); +   +  size_t sid = 0, in_sz = 99999999; // sentence id, input size +  double acc_1best_score = 0., acc_1best_model = 0.; +  vector<vector<double> > scores_per_iter; +  double max_score = 0.; +  size_t best_t = 0; +  bool next = false, stop = false; +  double score = 0.; +  size_t cand_len = 0; +  double overall_time = 0.; + +  // for the perceptron/SVM; TODO as params +  double eta = 0.0005; +  double gamma = 0.;//01; // -> SVM +  lambdas.add_value( FD::Convert("__bias"), 0 ); +   +  // for random sampling +  srand ( time(NULL) ); + + +  for ( size_t t = 0; t < T; t++ ) // T epochs +  { + +  time_t start, end;   +  time( &start ); + +  // actually, we need only need this if t > 0 FIXME +  ifstream grammar_file( grammar_buf_tmp_fn, ios_base::in | ios_base::binary ); +  filtering_istream grammar_buf_in; +  grammar_buf_in.push( gzip_decompressor() ); +  grammar_buf_in.push( grammar_file ); + +  // reset average scores +  acc_1best_score = acc_1best_model = 0.; +   +  // reset sentence counter +  sid = 0; +   +  if ( !quiet ) cout << "Iteration #" << t+1 << " of " << T << "." << endl; +   +  while( true ) +  { + +    // get input from stdin or file +    in.clear(); +    next = stop = false; // next iteration, premature stop +    if ( t == 0 ) {     +      if ( input_fn == "-" ) { +        if ( !getline(cin, in) ) next = true; +      } else { +        if ( !getline(input, in) ) next = true;  +      } +    } else { +      if ( sid == in_sz ) next = true; // stop if we reach the end of our input +    } +    // stop after X sentences (but still iterate for those) +    if ( stop_after > 0 && stop_after == sid && !next ) stop = true; +     +    // produce some pretty output +    if ( !quiet && !verbose ) { +        if ( sid == 0 ) cout << " "; +        if ( (sid+1) % (DTRAIN_DOTS) == 0 ) { +            cout << "."; +            cout.flush(); +        } +        if ( (sid+1) % (20*DTRAIN_DOTS) == 0) { +            cout << " " << sid+1 << endl; +            if ( !next && !stop ) cout << " "; +        } +        if ( stop ) { +          if ( sid % (20*DTRAIN_DOTS) != 0 ) cout << " " << sid << endl; +          cout << "Stopping after " << stop_after << " input sentences." << endl; +        } else { +          if ( next ) { +            if ( sid % (20*DTRAIN_DOTS) != 0 ) { +              cout << " " << sid << endl; +            } +          } +        } +    } +     +    // next iteration +    if ( next || stop ) break; + +    // weights +    dense_weights.clear(); +    weights.InitFromVector( lambdas ); +    weights.InitVector( &dense_weights ); +    decoder.SetWeights( dense_weights ); + +    if ( t == 0 ) { +      // handling input +      in_split.clear(); +      boost::split( in_split, in, boost::is_any_of("\t") ); // in_split[0] is id +      // getting reference +      ref_tok.clear(); ref_ids.clear(); +      boost::split( ref_tok, in_split[2], boost::is_any_of(" ") ); +      register_and_convert( ref_tok, ref_ids ); +      ref_ids_buf.push_back( ref_ids ); +      // process and set grammar +      bool broken_grammar = true; +      for ( string::iterator ti = in_split[3].begin(); ti != in_split[3].end(); ti++ ) { +        if ( !isspace(*ti) ) { +          broken_grammar = false; +          break; +        } +      } +      if ( broken_grammar ) continue; +      grammar_str = boost::replace_all_copy( in_split[3], " __NEXT__RULE__ ", "\n" ) + "\n"; // FIXME copy, __ +      grammar_buf << grammar_str << DTRAIN_GRAMMAR_DELIM << endl; +      decoder.SetSentenceGrammarFromString( grammar_str ); +      // decode, kbest +      src_str_buf.push_back( in_split[1] ); +      decoder.Decode( in_split[1], &observer ); +    } else { +      // get buffered grammar +      grammar_str.clear(); +      int i = 1; +      while ( true ) { +        string g;   +        getline( grammar_buf_in, g ); +        if ( g == DTRAIN_GRAMMAR_DELIM ) break; +        grammar_str += g+"\n"; +        i += 1; +      } +      decoder.SetSentenceGrammarFromString( grammar_str ); +      // decode, kbest +      decoder.Decode( src_str_buf[sid], &observer ); +    } + +    // get kbest list +    KBestList* kb; +    //if ( ) { // TODO get from forest +      kb = observer.GetKBest(); +    //} + +    // scoring kbest +    if ( t > 0 ) ref_ids = ref_ids_buf[sid]; +    for ( size_t i = 0; i < kb->GetSize(); i++ ) { +      NgramCounts counts = make_ngram_counts( ref_ids, kb->sents[i], N ); +      // this is for approx bleu +      if ( scorer_str == "approx_bleu" ) { +        if ( i == 0 ) { // 'context of 1best translations' +          global_counts  += counts; +          global_hyp_len += kb->sents[i].size(); +          global_ref_len += ref_ids.size(); +          counts.reset(); +          cand_len = 0; +        } else { +            cand_len = kb->sents[i].size(); +        } +        NgramCounts counts_tmp = global_counts + counts; +        // TODO as param +        score = 0.9 * scorer( counts_tmp, +                              global_ref_len, +                              global_hyp_len + cand_len, N, bleu_weights ); +      } else { +        // other scorers +        cand_len = kb->sents[i].size(); +        score = scorer( counts, +                        ref_ids.size(), +                        kb->sents[i].size(), N, bleu_weights ); +      } + +      kb->scores.push_back( score ); + +      if ( i == 0 ) { +        acc_1best_score += score; +        acc_1best_model += kb->model_scores[i]; +      } + +      if ( verbose ) { +        if ( i == 0 ) cout << "'" << TD::GetString( ref_ids ) << "' [ref]" << endl; +        cout << _prec5 << _nopos << "[hyp " << i << "] " << "'" << TD::GetString( kb->sents[i] ) << "'"; +        cout << " [SCORE=" << score << ",model="<< kb->model_scores[i] << "]" << endl; +        cout << kb->feats[i] << endl; // this is maybe too verbose +      } +    } // Nbest loop + +    if ( verbose ) cout << endl; + + +    // UPDATE WEIGHTS +    if ( !noup ) { + +      TrainingInstances pairs; +      sample_all( kb, pairs, n_pairs ); +        +      vector< SparseVector<double> > featureValueDiffs; +      vector<double> lossMinusModelScoreDiffs; +      for ( TrainingInstances::iterator ti = pairs.begin(); +            ti != pairs.end(); ti++ ) { + +        SparseVector<double> dv; +        if ( ti->first_score - ti->second_score < 0 ) { +          dv = ti->second - ti->first; +          dv.add_value( FD::Convert("__bias"), -1 ); +         +	  featureValueDiffs.push_back(dv); +	  double lossMinusModelScoreDiff = ti->loss_diff - ti->model_score_diff; +	  lossMinusModelScoreDiffs.push_back(lossMinusModelScoreDiff); + +	  if (update_type == "perceptron") { +	    lambdas += dv * eta; +	    cerr << "after perceptron update: " << lambdas << endl << endl; +	  } + +          if ( verbose ) { +            cout << "{{ f("<< ti->first_rank <<") > f(" << ti->second_rank << ") but g(i)="<< ti->first_score <<" < g(j)="<< ti->second_score << " so update" << endl; +            cout << " i  " << TD::GetString(kb->sents[ti->first_rank]) << endl; +            cout << "    " << kb->feats[ti->first_rank] << endl; +            cout << " j  " << TD::GetString(kb->sents[ti->second_rank]) << endl; +            cout << "    " << kb->feats[ti->second_rank] << endl;  +            cout << " diff vec: " << dv << endl; +            cout << " lambdas after update: " << lambdas << endl; +            cout << "}}" << endl; +          } +        } else { +          //SparseVector<double> reg; +          //reg = lambdas * ( 2 * gamma ); +          //lambdas += reg * ( -eta ); +        } +      } +      cerr << "Collected " << featureValueDiffs.size() << " constraints." << endl; + +      double slack = 0.01; +      if (update_type == "mira") { +	if (featureValueDiffs.size() > 0) { +	  vector<double> alphas; +	  if (slack != 0) { +	    alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs, slack); +	  } else { +	    alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs); +	  } +	   +	  for (size_t k = 0; k < featureValueDiffs.size(); ++k) { +	    lambdas += featureValueDiffs[k] * alphas[k]; +	  } +	  //	  cerr << "after mira update: " << lambdas << endl << endl; +	}       +      } +    } + +    ++sid; + +  } // input loop + +  if ( t == 0 ) in_sz = sid; // remember size (lines) of input + +  // print some stats +  double avg_1best_score = acc_1best_score/(double)in_sz; +  double avg_1best_model = acc_1best_model/(double)in_sz; +  double avg_1best_score_diff, avg_1best_model_diff; +  if ( t > 0 ) { +    avg_1best_score_diff = avg_1best_score - scores_per_iter[t-1][0]; +    avg_1best_model_diff = avg_1best_model - scores_per_iter[t-1][1]; +  } else { +    avg_1best_score_diff = avg_1best_score; +    avg_1best_model_diff = avg_1best_model; +  } +  cout << _prec5 << _pos << "WEIGHTS" << endl; +  for (vector<string>::iterator it = wprint.begin(); it != wprint.end(); it++) { +    cout << setw(16) << *it << " = " << dense_weights[FD::Convert( *it )] << endl; +  } + +  cout << "        ---" << endl; +  cout << _nopos << "      avg score: " << avg_1best_score; +  cout << _pos << " (" << avg_1best_score_diff << ")" << endl; +  cout << _nopos << "avg model score: " << avg_1best_model; +  cout << _pos << " (" << avg_1best_model_diff << ")" << endl; +  vector<double> remember_scores; +  remember_scores.push_back( avg_1best_score ); +  remember_scores.push_back( avg_1best_model ); +  scores_per_iter.push_back( remember_scores ); +  if ( avg_1best_score > max_score ) { +    max_score = avg_1best_score; +    best_t = t; +  } + +  // close open files +  if ( input_fn != "-" ) input.close(); +  close( grammar_buf ); +  grammar_file.close(); + +  time ( &end ); +  double time_dif = difftime( end, start ); +  overall_time += time_dif; +  if ( !quiet ) { +    cout << _prec2 << _nopos << "(time " << time_dif/60. << " min, "; +    cout << time_dif/(double)in_sz<< " s/S)" << endl; +  } +   +  if ( t+1 != T ) cout << endl; + +  if ( noup ) break; + +  // write weights after every epoch                                                                                                                                                +  std::string s; +  std::stringstream out; +  out << t; +  s = out.str(); +  string weights_file = output_file + "." + s; +  weights.WriteToFile(weights_file, true ); + +  } // outer loop + +  unlink( grammar_buf_tmp_fn ); +  if ( !noup ) { +    if ( !quiet ) cout << endl << "writing weights file '" << cfg["output"].as<string>() << "' ..."; +    weights.WriteToFile( cfg["output"].as<string>(), true ); +    if ( !quiet ) cout << "done" << endl; +  } +   +  if ( !quiet ) { +    cout << _prec5 << _nopos << endl << "---" << endl << "Best iteration: "; +    cout << best_t+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl; +    cout << _prec2 << "This took " << overall_time/60. << " min." << endl; +  } + +  return 0; +} + diff --git a/dtrain/test/mtm11/mira_update/sample.h b/dtrain/test/mtm11/mira_update/sample.h new file mode 100644 index 00000000..5c331bba --- /dev/null +++ b/dtrain/test/mtm11/mira_update/sample.h @@ -0,0 +1,101 @@ +#ifndef _DTRAIN_SAMPLE_H_ +#define _DTRAIN_SAMPLE_H_ + + +#include "kbestget.h" + + +namespace dtrain +{ + + +struct TPair +{ +  SparseVector<double> first, second; +  size_t first_rank, second_rank; +  double first_score, second_score; +  double model_score_diff; +  double loss_diff; +}; + +typedef vector<TPair> TrainingInstances; + + +void +  sample_all( KBestList* kb, TrainingInstances &training, size_t n_pairs ) +{ +  std::vector<double> loss_diffs; +  TrainingInstances training_tmp; +  for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { +    for ( size_t j = i+1; j < kb->GetSize(); j++ ) { +      TPair p; +      p.first = kb->feats[i]; +      p.second = kb->feats[j]; +      p.first_rank = i; +      p.second_rank = j; +      p.first_score = kb->scores[i]; +      p.second_score = kb->scores[j]; + +      bool conservative = 1; +      if ( kb->scores[i] - kb->scores[j] < 0 ) { +	// j=hope, i=fear                                                                                                                          +	p.model_score_diff = kb->model_scores[j] - kb->model_scores[i]; +        p.loss_diff = kb->scores[j] - kb->scores[i]; +        training_tmp.push_back(p); +        loss_diffs.push_back(p.loss_diff); +      } +      else if (!conservative) { +	// i=hope, j=fear +	p.model_score_diff = kb->model_scores[i] - kb->model_scores[j]; +        p.loss_diff = kb->scores[i] - kb->scores[j]; +        training_tmp.push_back(p); +        loss_diffs.push_back(p.loss_diff); +      } +    } +  } +   +  if (training_tmp.size() > 0) { +    double threshold; +    std::sort(loss_diffs.begin(), loss_diffs.end()); +    std::reverse(loss_diffs.begin(), loss_diffs.end()); +    threshold = loss_diffs.size() >= n_pairs ? loss_diffs[n_pairs-1] : loss_diffs[loss_diffs.size()-1]; +    cerr << "threshold: " << threshold << endl; +    size_t constraints = 0; +    for (size_t i = 0; (i < training_tmp.size() && constraints < n_pairs); ++i) { +      if (training_tmp[i].loss_diff >= threshold) { +	training.push_back(training_tmp[i]); +	constraints++; +      } +    } +  } +  else { +    cerr << "No pairs selected." << endl; +  } +} + +void +sample_rand( KBestList* kb, TrainingInstances &training ) +{ +  srand( time(NULL) ); +  for ( size_t i = 0; i < kb->GetSize()-1; i++ ) { +    for ( size_t j = i+1; j < kb->GetSize(); j++ ) { +      if ( rand() % 2 ) { +        TPair p; +        p.first = kb->feats[i]; +        p.second = kb->feats[j]; +        p.first_rank = i; +        p.second_rank = j; +        p.first_score = kb->scores[i]; +        p.second_score = kb->scores[j]; +        training.push_back( p ); +      } +    } +  } +} + + +} // namespace + + +#endif + diff --git a/dtrain/test/toy/cdec.ini b/dtrain/test/toy/cdec.ini new file mode 100644 index 00000000..98b02d44 --- /dev/null +++ b/dtrain/test/toy/cdec.ini @@ -0,0 +1,2 @@ +formalism=scfg +add_pass_through_rules=true diff --git a/dtrain/test/toy/dtrain.ini b/dtrain/test/toy/dtrain.ini new file mode 100644 index 00000000..abf22b94 --- /dev/null +++ b/dtrain/test/toy/dtrain.ini @@ -0,0 +1,12 @@ +decoder_config=test/toy/cdec.ini +input=test/toy/input +output=- +print_weights=logp shell_rule house_rule small_rule little_rule PassThrough +k=4 +N=4 +epochs=3 +scorer=stupid_bleu +sample_from=kbest +filter=uniq +pair_sampling=all +learning_rate=1 diff --git a/dtrain/test/toy/input b/dtrain/test/toy/input new file mode 100644 index 00000000..4d10a9ea --- /dev/null +++ b/dtrain/test/toy/input @@ -0,0 +1,2 @@ +0	ich sah ein kleines haus	i saw a little house	[S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0	[NP] ||| ich ||| i ||| logp=0	[NP] ||| ein [NN,1] ||| a [1] ||| logp=0	[NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1	[NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1	[JJ] ||| kleines ||| small ||| logp=0 small_rule=1	[JJ] ||| kleines ||| little ||| logp=0 little_rule=1	[JJ] ||| grosses ||| big ||| logp=0	[JJ] ||| grosses ||| large ||| logp=0	[VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0	[V] ||| sah ||| saw ||| logp=0	[V] ||| fand ||| found ||| logp=0 +1	ich fand ein kleines haus	i found a little house	[S] ||| [NP,1] [VP,2] ||| [1] [2] ||| logp=0	[NP] ||| ich ||| i ||| logp=0	[NP] ||| ein [NN,1] ||| a [1] ||| logp=0	[NN] ||| [JJ,1] haus ||| [1] house ||| logp=0 house_rule=1	[NN] ||| [JJ,1] haus ||| [1] shell ||| logp=0 shell_rule=1	[JJ] ||| kleines ||| small ||| logp=0 small_rule=1	[JJ] ||| kleines ||| little ||| logp=0 little_rule=1	[JJ] ||| grosses ||| big ||| logp=0	[JJ] ||| grosses ||| large ||| logp=0	[VP] ||| [V,1] [NP,2] ||| [1] [2] ||| logp=0	[V] ||| sah ||| saw ||| logp=0	[V] ||| fand ||| found ||| logp=0 | 
