diff options
| author | Chris Dyer <redpony@gmail.com> | 2013-08-08 13:32:44 -0700 | 
|---|---|---|
| committer | Chris Dyer <redpony@gmail.com> | 2013-08-08 13:32:44 -0700 | 
| commit | 951e7daa9539ffe640f9421897c374f786af53e7 (patch) | |
| tree | 321898257090cc623fa7ea10d81b8e83126a5a0b /training/dtrain | |
| parent | f4a3a2547316ca5d31366e6808858fe94981415c (diff) | |
| parent | af2b10dd036aa0088cfef108c1c9713b7e2d9f8f (diff) | |
Merge pull request #24 from pks/master
current dtrain version
Diffstat (limited to 'training/dtrain')
| -rw-r--r-- | training/dtrain/README.md | 11 | ||||
| -rw-r--r-- | training/dtrain/dtrain.cc | 76 | ||||
| -rw-r--r-- | training/dtrain/dtrain.h | 74 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/cdec.ini | 2 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/dtrain.ini | 2 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/work/out.0.0 | 9 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/work/out.0.1 | 9 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/work/out.1.0 | 9 | ||||
| -rw-r--r-- | training/dtrain/examples/parallelized/work/out.1.1 | 9 | ||||
| -rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 24 | ||||
| -rw-r--r-- | training/dtrain/examples/standard/expected-output | 86 | ||||
| -rw-r--r-- | training/dtrain/kbestget.h | 66 | ||||
| -rw-r--r-- | training/dtrain/ksampler.h | 5 | ||||
| -rwxr-xr-x | training/dtrain/parallelize.rb | 7 | ||||
| -rw-r--r-- | training/dtrain/score.h | 17 | 
15 files changed, 209 insertions, 197 deletions
| diff --git a/training/dtrain/README.md b/training/dtrain/README.md index 2ab2f232..2bae6b48 100644 --- a/training/dtrain/README.md +++ b/training/dtrain/README.md @@ -17,6 +17,17 @@ To build only parts needed for dtrain do    cd training/dtrain/; make  ``` +Ideas +----- + * get approx_bleu to work? + * implement minibatches (Minibatch and Parallelization for Online Large Margin Structured Learning) + * learning rate 1/T? + * use an oracle? mira-like (model vs. BLEU), feature repr. of reference!?  + * implement lc_bleu properly + * merge kbest lists of previous epochs (as MERT does) + * ``walk entire regularization path'' + * rerank after each update? +  Running  -------  See directories under test/ . diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc index 149f87d4..0ee2f124 100644 --- a/training/dtrain/dtrain.cc +++ b/training/dtrain/dtrain.cc @@ -1,4 +1,10 @@  #include "dtrain.h" +#include "score.h" +#include "kbestget.h" +#include "ksampler.h" +#include "pairsampling.h" + +using namespace dtrain;  bool @@ -138,23 +144,23 @@ main(int argc, char** argv)    string scorer_str = cfg["scorer"].as<string>();    LocalScorer* scorer;    if (scorer_str == "bleu") { -    scorer = dynamic_cast<BleuScorer*>(new BleuScorer); +    scorer = static_cast<BleuScorer*>(new BleuScorer);    } else if (scorer_str == "stupid_bleu") { -    scorer = dynamic_cast<StupidBleuScorer*>(new StupidBleuScorer); +    scorer = static_cast<StupidBleuScorer*>(new StupidBleuScorer);    } else if (scorer_str == "fixed_stupid_bleu") { -    scorer = dynamic_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer); +    scorer = static_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer);    } else if (scorer_str == "smooth_bleu") { -    scorer = dynamic_cast<SmoothBleuScorer*>(new SmoothBleuScorer); +    scorer = static_cast<SmoothBleuScorer*>(new SmoothBleuScorer);    } else if (scorer_str == "sum_bleu") { -    scorer = dynamic_cast<SumBleuScorer*>(new SumBleuScorer); +    scorer = static_cast<SumBleuScorer*>(new SumBleuScorer);    } else if (scorer_str == "sumexp_bleu") { -    scorer = dynamic_cast<SumExpBleuScorer*>(new SumExpBleuScorer); +    scorer = static_cast<SumExpBleuScorer*>(new SumExpBleuScorer);    } else if (scorer_str == "sumwhatever_bleu") { -    scorer = dynamic_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer); +    scorer = static_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer);    } else if (scorer_str == "approx_bleu") { -    scorer = dynamic_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d)); +    scorer = static_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d));    } else if (scorer_str == "lc_bleu") { -    scorer = dynamic_cast<LinearBleuScorer*>(new LinearBleuScorer(N)); +    scorer = static_cast<LinearBleuScorer*>(new LinearBleuScorer(N));    } else {      cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl;      exit(1); @@ -166,9 +172,9 @@ main(int argc, char** argv)    MT19937 rng; // random number generator, only for forest sampling    HypSampler* observer;    if (sample_from == "kbest") -    observer = dynamic_cast<KBestGetter*>(new KBestGetter(k, filter_type)); +    observer = static_cast<KBestGetter*>(new KBestGetter(k, filter_type));    else -    observer = dynamic_cast<KSampler*>(new KSampler(k, &rng)); +    observer = static_cast<KSampler*>(new KSampler(k, &rng));    observer->SetScorer(scorer);    // init weights @@ -360,6 +366,9 @@ main(int argc, char** argv)          PROsampling(samples, pairs, pair_threshold, max_pairs);        npairs += pairs.size(); +      SparseVector<weight_t> lambdas_copy; +      if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas; +        for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();             it != pairs.end(); it++) {          bool rank_error; @@ -369,7 +378,7 @@ main(int argc, char** argv)            margin = std::numeric_limits<float>::max();          } else {            rank_error = it->first.model <= it->second.model; -          margin = fabs(fabs(it->first.model) - fabs(it->second.model)); +          margin = fabs(it->first.model - it->second.model);            if (!rank_error && margin < loss_margin) margin_violations++;          }          if (rank_error) rank_errors++; @@ -383,23 +392,26 @@ main(int argc, char** argv)        }        // l1 regularization -      // please note that this penalizes _all_ weights -      // (contrary to only the ones changed by the last update) -      // after a _sentence_ (not after each example/pair) +      // please note that this regularizations happen +      // after a _sentence_ -- not after each example/pair!        if (l1naive) {          FastSparseVector<weight_t>::iterator it = lambdas.begin();          for (; it != lambdas.end(); ++it) { -          it->second -= sign(it->second) * l1_reg; +          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { +            it->second -= sign(it->second) * l1_reg; +          }          }        } else if (l1clip) {          FastSparseVector<weight_t>::iterator it = lambdas.begin();          for (; it != lambdas.end(); ++it) { -          if (it->second != 0) { -            weight_t v = it->second; -            if (v > 0) { -              it->second = max(0., v - l1_reg); -            } else { -              it->second = min(0., v + l1_reg); +          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { +            if (it->second != 0) { +              weight_t v = it->second; +              if (v > 0) { +                it->second = max(0., v - l1_reg); +              } else { +                it->second = min(0., v + l1_reg); +              }              }            }          } @@ -407,16 +419,18 @@ main(int argc, char** argv)          weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input          FastSparseVector<weight_t>::iterator it = lambdas.begin();          for (; it != lambdas.end(); ++it) { -          if (it->second != 0) { -            weight_t v = it->second; -            weight_t penalized = 0.; -            if (v > 0) { -              penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first))); -            } else { -              penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first))); +          if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) { +            if (it->second != 0) { +              weight_t v = it->second; +              weight_t penalized = 0.; +              if (v > 0) { +                penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first))); +              } else { +                penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first))); +              } +              it->second = penalized; +              cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized);              } -            it->second = penalized; -            cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized);            }          }        } diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h index eb0b9f17..3981fb39 100644 --- a/training/dtrain/dtrain.h +++ b/training/dtrain/dtrain.h @@ -11,16 +11,19 @@  #include <boost/algorithm/string.hpp>  #include <boost/program_options.hpp> -#include "ksampler.h" -#include "pairsampling.h" - -#include "filelib.h" - +#include "decoder.h" +#include "ff_register.h" +#include "sentence_metadata.h" +#include "verbose.h" +#include "viterbi.h"  using namespace std; -using namespace dtrain;  namespace po = boost::program_options; +namespace dtrain +{ + +  inline void register_and_convert(const vector<string>& strs, vector<WordID>& ids)  {    vector<string>::const_iterator it; @@ -42,17 +45,55 @@ inline string gettmpf(const string path, const string infix)    return string(fn);  } -inline void split_in(string& s, vector<string>& parts) +typedef double score_t; + +struct ScoredHyp  { -  unsigned f = 0; -  for(unsigned i = 0; i < 3; i++) { -    unsigned e = f; -    f = s.find("\t", f+1); -    if (e != 0) parts.push_back(s.substr(e+1, f-e-1)); -    else parts.push_back(s.substr(0, f)); +  vector<WordID> w; +  SparseVector<double> f; +  score_t model; +  score_t score; +  unsigned rank; +}; + +struct LocalScorer +{ +  unsigned N_; +  vector<score_t> w_; + +  virtual score_t +  Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; + +  virtual void Reset() {} // only for ApproxBleuScorer, LinearBleuScorer + +  inline void +  Init(unsigned N, vector<score_t> weights) +  { +    assert(N > 0); +    N_ = N; +    if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_); +    else w_ = weights;    } -  s.erase(0, f+1); -} + +  inline score_t +  brevity_penalty(const unsigned hyp_len, const unsigned ref_len) +  { +    if (hyp_len > ref_len) return 1; +    return exp(1 - (score_t)ref_len/hyp_len); +  } +}; + +struct HypSampler : public DecoderObserver +{ +  LocalScorer* scorer_; +  vector<WordID>* ref_; +  unsigned f_count_, sz_; +  virtual vector<ScoredHyp>* GetSamples()=0; +  inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; } +  inline void SetRef(vector<WordID>& ref) { ref_ = &ref; } +  inline unsigned get_f_count() { return f_count_; } +  inline unsigned get_sz() { return sz_; } +};  struct HSReporter  { @@ -88,5 +129,8 @@ inline T sign(T z)    return z < 0 ? -1 : +1;  } + +} // namespace +  #endif diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini index e43ba1c4..5773029a 100644 --- a/training/dtrain/examples/parallelized/cdec.ini +++ b/training/dtrain/examples/parallelized/cdec.ini @@ -4,7 +4,7 @@ intersection_strategy=cube_pruning  cubepruning_pop_limit=200  scfg_max_span_limit=15  feature_function=WordPenalty -feature_function=KLanguageModel ../example/nc-wmt11.en.srilm.gz +feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz  #feature_function=ArityPenalty  #feature_function=CMR2008ReorderingFeatures  #feature_function=Dwarf diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini index f19ef891..0b0932d6 100644 --- a/training/dtrain/examples/parallelized/dtrain.ini +++ b/training/dtrain/examples/parallelized/dtrain.ini @@ -11,6 +11,4 @@ pair_sampling=XYX  hi_lo=0.1  select_weights=last  print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -# newer version of the grammar extractor use different feature names:  -#print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough  decoder_config=cdec.ini diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0 index 7a00ed0f..c559dd4d 100644 --- a/training/dtrain/examples/parallelized/work/out.0.0 +++ b/training/dtrain/examples/parallelized/work/out.0.0 @@ -1,9 +1,9 @@                  cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 3121929377 +Seeding random number sequence to 405292278  dtrain  Parameters: @@ -16,6 +16,7 @@ Parameters:             learning rate 0.0001                     gamma 0               loss margin 1 +       faster perceptron 0                     pairs 'XYX'                     hi lo 0.1            pair threshold 0 @@ -51,11 +52,11 @@ WEIGHTS      non0 feature count: 12             avg list sz: 100             avg f count: 11.32 -(time 0.37 min, 4.4 s/S) +(time 0.35 min, 4.2 s/S)  Writing weights file to 'work/weights.0.0' ...  done  ---  Best iteration: 1 [SCORE 'stupid_bleu'=0.17521]. -This took 0.36667 min. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1 index e2bd6649..8bc7ea9c 100644 --- a/training/dtrain/examples/parallelized/work/out.0.1 +++ b/training/dtrain/examples/parallelized/work/out.0.1 @@ -1,9 +1,9 @@                  cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 2767202922 +Seeding random number sequence to 43859692  dtrain  Parameters: @@ -16,6 +16,7 @@ Parameters:             learning rate 0.0001                     gamma 0               loss margin 1 +       faster perceptron 0                     pairs 'XYX'                     hi lo 0.1            pair threshold 0 @@ -52,11 +53,11 @@ WEIGHTS      non0 feature count: 12             avg list sz: 100             avg f count: 10.496 -(time 0.32 min, 3.8 s/S) +(time 0.35 min, 4.2 s/S)  Writing weights file to 'work/weights.0.1' ...  done  ---  Best iteration: 1 [SCORE 'stupid_bleu'=0.26638]. -This took 0.31667 min. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0 index 6e790e38..65d1e7dc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.0 +++ b/training/dtrain/examples/parallelized/work/out.1.0 @@ -1,9 +1,9 @@                  cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 1432415010 +Seeding random number sequence to 4126799437  dtrain  Parameters: @@ -16,6 +16,7 @@ Parameters:             learning rate 0.0001                     gamma 0               loss margin 1 +       faster perceptron 0                     pairs 'XYX'                     hi lo 0.1            pair threshold 0 @@ -51,11 +52,11 @@ WEIGHTS      non0 feature count: 11             avg list sz: 100             avg f count: 11.814 -(time 0.45 min, 5.4 s/S) +(time 0.43 min, 5.2 s/S)  Writing weights file to 'work/weights.1.0' ...  done  ---  Best iteration: 1 [SCORE 'stupid_bleu'=0.10863]. -This took 0.45 min. +This took 0.43333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1 index 0b984761..f479fbbc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.1 +++ b/training/dtrain/examples/parallelized/work/out.1.1 @@ -1,9 +1,9 @@                  cdec cfg 'cdec.ini'  Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  **************************************************************************************************** -Seeding random number sequence to 1771918374 +Seeding random number sequence to 2112412848  dtrain  Parameters: @@ -16,6 +16,7 @@ Parameters:             learning rate 0.0001                     gamma 0               loss margin 1 +       faster perceptron 0                     pairs 'XYX'                     hi lo 0.1            pair threshold 0 @@ -52,11 +53,11 @@ WEIGHTS      non0 feature count: 12             avg list sz: 100             avg f count: 11.224 -(time 0.42 min, 5 s/S) +(time 0.45 min, 5.4 s/S)  Writing weights file to 'work/weights.1.1' ...  done  ---  Best iteration: 1 [SCORE 'stupid_bleu'=0.13169]. -This took 0.41667 min. +This took 0.45 min. diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index e1072d30..23e94285 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -10,15 +10,15 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr  stop_after=10 # stop epoch after 10 inputs  # interesting stuff -epochs=2                # run over input 2 times -k=100                   # use 100best lists -N=4                     # optimize (approx) BLEU4 -scorer=stupid_bleu      # use 'stupid' BLEU+1 -learning_rate=1.0       # learning rate, don't care if gamma=0 (perceptron) -gamma=0                 # use SVM reg -sample_from=kbest       # use kbest lists (as opposed to forest) -filter=uniq             # only unique entries in kbest (surface form) -pair_sampling=XYX       # -hi_lo=0.1               # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0        # minimum distance in BLEU (here: > 0) -loss_margin=0           # update if correctly ranked, but within this margin +epochs=2                 # run over input 2 times +k=100                    # use 100best lists +N=4                      # optimize (approx) BLEU4 +scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 +learning_rate=1.0        # learning rate, don't care if gamma=0 (perceptron) +gamma=0                  # use SVM reg +sample_from=kbest        # use kbest lists (as opposed to forest) +filter=uniq              # only unique entries in kbest (surface form) +pair_sampling=XYX        # +hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here +pair_threshold=0         # minimum distance in BLEU (here: > 0) +loss_margin=0            # update if correctly ranked, but within this margin diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output index 7cd09dbf..21f91244 100644 --- a/training/dtrain/examples/standard/expected-output +++ b/training/dtrain/examples/standard/expected-output @@ -4,14 +4,14 @@ Reading ./nc-wmt11.en.srilm.gz  ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100  ****************************************************************************************************    Example feature: Shape_S00000_T00000 -Seeding random number sequence to 2679584485 +Seeding random number sequence to 970626287  dtrain  Parameters:                         k 100                         N 4                         T 2 -                  scorer 'stupid_bleu' +                  scorer 'fixed_stupid_bleu'               sample from 'kbest'                    filter 'uniq'             learning rate 1 @@ -34,58 +34,58 @@ Iteration #1 of 2.   . 10  Stopping after 10 input sentences.  WEIGHTS -              Glue = -576 -       WordPenalty = +417.79 -     LanguageModel = +5117.5 - LanguageModel_OOV = -1307 -     PhraseModel_0 = -1612 -     PhraseModel_1 = -2159.6 -     PhraseModel_2 = -677.36 -     PhraseModel_3 = +2663.8 -     PhraseModel_4 = -1025.9 -     PhraseModel_5 = -8 -     PhraseModel_6 = +70 -       PassThrough = -1455 +              Glue = -614 +       WordPenalty = +1256.8 +     LanguageModel = +5610.5 + LanguageModel_OOV = -1449 +     PhraseModel_0 = -2107 +     PhraseModel_1 = -4666.1 +     PhraseModel_2 = -2713.5 +     PhraseModel_3 = +4204.3 +     PhraseModel_4 = -1435.8 +     PhraseModel_5 = +916 +     PhraseModel_6 = +190 +       PassThrough = -2527          --- -       1best avg score: 0.27697 (+0.27697) - 1best avg model score: -47918 (-47918) -           avg # pairs: 581.9 (meaningless) -        avg # rank err: 581.9 +       1best avg score: 0.17874 (+0.17874) + 1best avg model score: 88399 (+88399) +           avg # pairs: 798.2 (meaningless) +        avg # rank err: 798.2       avg # margin viol: 0 -    non0 feature count: 703 -           avg list sz: 90.9 -           avg f count: 100.09 -(time 0.25 min, 1.5 s/S) +    non0 feature count: 887 +           avg list sz: 91.3 +           avg f count: 126.85 +(time 0.33 min, 2 s/S)  Iteration #2 of 2.   . 10  WEIGHTS -              Glue = -622 -       WordPenalty = +898.56 -     LanguageModel = +8066.2 - LanguageModel_OOV = -2590 -     PhraseModel_0 = -4335.8 -     PhraseModel_1 = -5864.4 -     PhraseModel_2 = -1729.8 -     PhraseModel_3 = +2831.9 -     PhraseModel_4 = -5384.8 -     PhraseModel_5 = +1449 -     PhraseModel_6 = +480 -       PassThrough = -2578 +              Glue = -1025 +       WordPenalty = +1751.5 +     LanguageModel = +10059 + LanguageModel_OOV = -4490 +     PhraseModel_0 = -2640.7 +     PhraseModel_1 = -3757.4 +     PhraseModel_2 = -1133.1 +     PhraseModel_3 = +1837.3 +     PhraseModel_4 = -3534.3 +     PhraseModel_5 = +2308 +     PhraseModel_6 = +1677 +       PassThrough = -6222          --- -       1best avg score: 0.37119 (+0.094226) - 1best avg model score: -1.3174e+05 (-83822) -           avg # pairs: 584.1 (meaningless) -        avg # rank err: 584.1 +       1best avg score: 0.30764 (+0.12891) + 1best avg model score: -2.5042e+05 (-3.3882e+05) +           avg # pairs: 725.9 (meaningless) +        avg # rank err: 725.9       avg # margin viol: 0 -    non0 feature count: 1115 +    non0 feature count: 1499             avg list sz: 91.3 -           avg f count: 90.755 -(time 0.3 min, 1.8 s/S) +           avg f count: 114.34 +(time 0.32 min, 1.9 s/S)  Writing weights file to '-' ...  done  --- -Best iteration: 2 [SCORE 'stupid_bleu'=0.37119]. -This took 0.55 min. +Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764]. +This took 0.65 min. diff --git a/training/dtrain/kbestget.h b/training/dtrain/kbestget.h index dd8882e1..85252db3 100644 --- a/training/dtrain/kbestget.h +++ b/training/dtrain/kbestget.h @@ -1,76 +1,12 @@  #ifndef _DTRAIN_KBESTGET_H_  #define _DTRAIN_KBESTGET_H_ -#include "kbest.h" // cdec -#include "sentence_metadata.h" - -#include "verbose.h" -#include "viterbi.h" -#include "ff_register.h" -#include "decoder.h" -#include "weights.h" -#include "logval.h" - -using namespace std; +#include "kbest.h"  namespace dtrain  { -typedef double score_t; - -struct ScoredHyp -{ -  vector<WordID> w; -  SparseVector<double> f; -  score_t model; -  score_t score; -  unsigned rank; -}; - -struct LocalScorer -{ -  unsigned N_; -  vector<score_t> w_; - -  virtual score_t -  Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; - -  void Reset() {} // only for approx bleu - -  inline void -  Init(unsigned N, vector<score_t> weights) -  { -    assert(N > 0); -    N_ = N; -    if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_); -    else w_ = weights; -  } - -  inline score_t -  brevity_penalty(const unsigned hyp_len, const unsigned ref_len) -  { -    if (hyp_len > ref_len) return 1; -    return exp(1 - (score_t)ref_len/hyp_len); -  } -}; - -struct HypSampler : public DecoderObserver -{ -  LocalScorer* scorer_; -  vector<WordID>* ref_; -  unsigned f_count_, sz_; -  virtual vector<ScoredHyp>* GetSamples()=0; -  inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; } -  inline void SetRef(vector<WordID>& ref) { ref_ = &ref; } -  inline unsigned get_f_count() { return f_count_; } -  inline unsigned get_sz() { return sz_; } -}; -//////////////////////////////////////////////////////////////////////////////// - - - -  struct KBestGetter : public HypSampler  {    const unsigned k_; diff --git a/training/dtrain/ksampler.h b/training/dtrain/ksampler.h index bc2f56cd..29dab667 100644 --- a/training/dtrain/ksampler.h +++ b/training/dtrain/ksampler.h @@ -1,13 +1,12 @@  #ifndef _DTRAIN_KSAMPLER_H_  #define _DTRAIN_KSAMPLER_H_ -#include "hg_sampler.h" // cdec -#include "kbestget.h" -#include "score.h" +#include "hg_sampler.h"  namespace dtrain  { +  bool  cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b)  { diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb index e661416e..285f3c9b 100755 --- a/training/dtrain/parallelize.rb +++ b/training/dtrain/parallelize.rb @@ -4,7 +4,7 @@ require 'trollop'  def usage    STDERR.write "Usage: " -  STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"]\n" +  STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"] [--extra_qsub \"-l virtual_free=24G\"]\n"    exit 1  end @@ -20,6 +20,7 @@ opts = Trollop::options do    opt :references, "references", :type => :string    opt :qsub, "use qsub", :type => :bool, :default => false    opt :dtrain_binary, "path to dtrain binary", :type => :string +  opt :extra_qsub, "extra qsub args", :type => :string, :default => ""  end  usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references] @@ -119,11 +120,11 @@ end        qsub_str_start = qsub_str_end = ''        local_end = ''        if use_qsub -        qsub_str_start = "qsub -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \"" +        qsub_str_start = "qsub #{opts[:extra_qsub]} -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \""          qsub_str_end = "\""          local_end = ''        else -        local_end = "&>work/out.#{shard}.#{epoch}" +        local_end = "2>work/out.#{shard}.#{epoch}"        end        pids << Kernel.fork {          `#{qsub_str_start}#{dtrain_bin} -c #{ini}\ diff --git a/training/dtrain/score.h b/training/dtrain/score.h index bddaa071..53e970ba 100644 --- a/training/dtrain/score.h +++ b/training/dtrain/score.h @@ -1,9 +1,7 @@  #ifndef _DTRAIN_SCORE_H_  #define _DTRAIN_SCORE_H_ -#include "kbestget.h" - -using namespace std; +#include "dtrain.h"  namespace dtrain  { @@ -141,36 +139,43 @@ struct BleuScorer : public LocalScorer  {    score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len);    score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct StupidBleuScorer : public LocalScorer  {    score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct FixedStupidBleuScorer : public LocalScorer  {    score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct SmoothBleuScorer : public LocalScorer  {    score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct SumBleuScorer : public LocalScorer  { -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct SumExpBleuScorer : public LocalScorer  { -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {}  };  struct SumWhateverBleuScorer : public LocalScorer  { -   score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); +  void Reset() {};  };  struct ApproxBleuScorer : public BleuScorer | 
