diff options
author | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2013-10-08 13:57:45 +0200 |
---|---|---|
committer | Patrick Simianer <simianer@cl.uni-heidelberg.de> | 2013-10-08 13:57:45 +0200 |
commit | 5fc77937dde48dddde264261cb773b07fe7cd560 (patch) | |
tree | 561d7f8e830e0c71c6bf89504a986a564b2da691 | |
parent | e11bbf4e4afb3e90710e45eb5cc7dff89bb559bc (diff) |
dtrain: added pclr variants and new expected-output; fixed bug in soft syntax features
-rw-r--r-- | decoder/ff_soft_syntax.cc | 2 | ||||
-rw-r--r-- | training/dtrain/dtrain.cc | 31 | ||||
-rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 6 | ||||
-rw-r--r-- | training/dtrain/examples/standard/expected-output | 115 | ||||
-rwxr-xr-x | training/dtrain/parallelize.rb | 11 |
5 files changed, 101 insertions, 64 deletions
diff --git a/decoder/ff_soft_syntax.cc b/decoder/ff_soft_syntax.cc index 9981fa45..d84f2e6d 100644 --- a/decoder/ff_soft_syntax.cc +++ b/decoder/ff_soft_syntax.cc @@ -21,7 +21,7 @@ using namespace std; struct SoftSyntacticFeaturesImpl { SoftSyntacticFeaturesImpl(const string& param) { vector<string> labels = SplitOnWhitespace(param); - for (unsigned int i = 0; i < labels.size(); i++) + //for (unsigned int i = 0; i < labels.size(); i++) //cerr << "Labels: " << labels.at(i) << endl; for (unsigned int i = 0; i < labels.size(); i++) { string label = labels.at(i); diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc index 9d60a903..38a9b69a 100644 --- a/training/dtrain/dtrain.cc +++ b/training/dtrain/dtrain.cc @@ -40,7 +40,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg) ("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair") ("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near") ("max_pairs", po::value<unsigned>()->default_value(std::numeric_limits<unsigned>::max()), "max. # of pairs per Sent.") - ("pclr", po::value<bool>()->zero_tokens(), "use a (simple) per-coordinate learning rate") + ("pclr", po::value<string>()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate") ("noup", po::value<bool>()->zero_tokens(), "do not update weights"); po::options_description cl("Command Line Options"); cl.add_options() @@ -125,8 +125,7 @@ main(int argc, char** argv) if (loss_margin > 9998.) loss_margin = std::numeric_limits<float>::max(); bool scale_bleu_diff = false; if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true; - bool pclr = false; - if (cfg.count("pclr")) pclr = true; + const string pclr = cfg["pclr"].as<string>(); bool average = false; if (select_weights == "avg") average = true; @@ -190,7 +189,6 @@ main(int argc, char** argv) weight_t gamma = cfg["gamma"].as<weight_t>(); // faster perceptron: consider only misranked pairs, see - // DO NOT ENABLE WITH SVM (gamma > 0) OR loss_margin! bool faster_perceptron = false; if (gamma==0 && loss_margin==0) faster_perceptron = true; @@ -251,8 +249,7 @@ main(int argc, char** argv) cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as<string>() << "'" << endl; if (rescale) cerr << setw(25) << "rescale " << rescale << endl; - if (pclr) - cerr << setw(25) << "pclr " << pclr << endl; + cerr << setw(25) << "pclr " << pclr << endl; cerr << setw(25) << "max pairs " << max_pairs << endl; cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl; cerr << setw(25) << "input " << "'" << input_fn << "'" << endl; @@ -392,22 +389,30 @@ main(int argc, char** argv) if (scale_bleu_diff) eta = it->first.score - it->second.score; if (rank_error || margin < loss_margin) { SparseVector<weight_t> diff_vec = it->first.f - it->second.f; - if (pclr) { + if (pclr != "no") { sum_up += diff_vec; } else { lambdas.plus_eq_v_times_s(diff_vec, eta); + if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); // FIXME } - if (gamma) - lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); } } // per-coordinate learning rate - if (pclr) { + if (pclr != "no") { SparseVector<weight_t>::iterator it = sum_up.begin(); - for (; it != lambdas.end(); ++it) { - learning_rates[it->first]++; - lambdas[it->first] += it->second / learning_rates[it->first]; //* max(0.00000001, eta/(eta+learning_rates[it->first])); + for (; it != sum_up.end(); ++it) { + if (pclr == "simple") { + lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]); + learning_rates[it->first]++; + } else if (pclr == "adagrad") { + if (learning_rates[it->first] == 0) { + lambdas[it->first] += it->second * eta; + } else { + lambdas[it->first] += it->second * eta * learning_rates[it->first]; + } + learning_rates[it->first] += pow(it->second, 2.0); + } } } diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index c0912a62..e6d6382e 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -1,6 +1,6 @@ input=./nc-wmt11.de.gz refs=./nc-wmt11.en.gz -output=asdf # a weights file (add .gz for gzip compression) or STDOUT '-' +output=- # a weights file (add .gz for gzip compression) or STDOUT '-' select_weights=VOID # output average (over epochs) weight vector decoder_config=./cdec.ini # config for cdec # weights for these features will be printed on each iteration @@ -10,11 +10,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr stop_after=10 # stop epoch after 10 inputs # interesting stuff -epochs=2 # run over input 2 times +epochs=3 # run over input 3 times k=100 # use 100best lists N=4 # optimize (approx) BLEU4 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) +learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output index 21f91244..a35bbe6f 100644 --- a/training/dtrain/examples/standard/expected-output +++ b/training/dtrain/examples/standard/expected-output @@ -4,13 +4,13 @@ Reading ./nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** Example feature: Shape_S00000_T00000 -Seeding random number sequence to 970626287 +Seeding random number sequence to 4049211323 dtrain Parameters: k 100 N 4 - T 2 + T 3 scorer 'fixed_stupid_bleu' sample from 'kbest' filter 'uniq' @@ -23,6 +23,7 @@ Parameters: pair threshold 0 select weights 'VOID' l1 reg 0 'none' + pclr no max pairs 4294967295 cdec cfg './cdec.ini' input './nc-wmt11.de.gz' @@ -30,62 +31,88 @@ Parameters: output '-' stop_after 10 (a dot represents 10 inputs) -Iteration #1 of 2. +Iteration #1 of 3. . 10 Stopping after 10 input sentences. WEIGHTS - Glue = -614 - WordPenalty = +1256.8 - LanguageModel = +5610.5 - LanguageModel_OOV = -1449 - PhraseModel_0 = -2107 - PhraseModel_1 = -4666.1 - PhraseModel_2 = -2713.5 - PhraseModel_3 = +4204.3 - PhraseModel_4 = -1435.8 - PhraseModel_5 = +916 - PhraseModel_6 = +190 - PassThrough = -2527 + Glue = -1100 + WordPenalty = -82.082 + LanguageModel = -3199.1 + LanguageModel_OOV = -192 + PhraseModel_0 = +3128.2 + PhraseModel_1 = -1610.2 + PhraseModel_2 = -4336.5 + PhraseModel_3 = +2910.3 + PhraseModel_4 = +2523.2 + PhraseModel_5 = +506 + PhraseModel_6 = +1467 + PassThrough = -387 --- - 1best avg score: 0.17874 (+0.17874) - 1best avg model score: 88399 (+88399) - avg # pairs: 798.2 (meaningless) - avg # rank err: 798.2 + 1best avg score: 0.16966 (+0.16966) + 1best avg model score: 2.9874e+05 (+2.9874e+05) + avg # pairs: 906.3 (meaningless) + avg # rank err: 906.3 avg # margin viol: 0 - non0 feature count: 887 + non0 feature count: 825 avg list sz: 91.3 - avg f count: 126.85 -(time 0.33 min, 2 s/S) + avg f count: 139.77 +(time 0.35 min, 2.1 s/S) -Iteration #2 of 2. +Iteration #2 of 3. . 10 WEIGHTS - Glue = -1025 - WordPenalty = +1751.5 - LanguageModel = +10059 - LanguageModel_OOV = -4490 - PhraseModel_0 = -2640.7 - PhraseModel_1 = -3757.4 - PhraseModel_2 = -1133.1 - PhraseModel_3 = +1837.3 - PhraseModel_4 = -3534.3 - PhraseModel_5 = +2308 - PhraseModel_6 = +1677 - PassThrough = -6222 + Glue = -1221 + WordPenalty = +836.89 + LanguageModel = +2332.3 + LanguageModel_OOV = -1451 + PhraseModel_0 = +1507.2 + PhraseModel_1 = -2728.4 + PhraseModel_2 = -4183.6 + PhraseModel_3 = +1816.3 + PhraseModel_4 = -2894.7 + PhraseModel_5 = +1403 + PhraseModel_6 = +35 + PassThrough = -1097 --- - 1best avg score: 0.30764 (+0.12891) - 1best avg model score: -2.5042e+05 (-3.3882e+05) - avg # pairs: 725.9 (meaningless) - avg # rank err: 725.9 + 1best avg score: 0.17399 (+0.004325) + 1best avg model score: 49369 (-2.4937e+05) + avg # pairs: 662.4 (meaningless) + avg # rank err: 662.4 avg # margin viol: 0 - non0 feature count: 1499 + non0 feature count: 1235 avg list sz: 91.3 - avg f count: 114.34 -(time 0.32 min, 1.9 s/S) + avg f count: 125.11 +(time 0.27 min, 1.6 s/S) + +Iteration #3 of 3. + . 10 +WEIGHTS + Glue = -1574 + WordPenalty = -17.372 + LanguageModel = +6861.8 + LanguageModel_OOV = -3997 + PhraseModel_0 = -398.76 + PhraseModel_1 = -3419.6 + PhraseModel_2 = -3186.7 + PhraseModel_3 = +1050.8 + PhraseModel_4 = -2902.7 + PhraseModel_5 = -486 + PhraseModel_6 = -436 + PassThrough = -2985 + --- + 1best avg score: 0.30742 (+0.13343) + 1best avg model score: -1.5393e+05 (-2.0329e+05) + avg # pairs: 623.8 (meaningless) + avg # rank err: 623.8 + avg # margin viol: 0 + non0 feature count: 1770 + avg list sz: 91.3 + avg f count: 118.58 +(time 0.25 min, 1.5 s/S) Writing weights file to '-' ... done --- -Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764]. -This took 0.65 min. +Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742]. +This took 0.86667 min. diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb index 2fc66cab..60ca9422 100755 --- a/training/dtrain/parallelize.rb +++ b/training/dtrain/parallelize.rb @@ -21,7 +21,8 @@ opts = Trollop::options do opt :qsub, "use qsub", :type => :bool, :default => false opt :dtrain_binary, "path to dtrain binary", :type => :string opt :extra_qsub, "extra qsub args", :type => :string, :default => "" - opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => :o + opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o' + opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w' end usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references] @@ -54,6 +55,7 @@ input = opts[:input] refs = opts[:references] use_qsub = opts[:qsub] shards_at_once = opts[:processes_at_once] +first_input_weights = opts[:first_input_weights] `mkdir work` @@ -137,10 +139,13 @@ end else cdec_cfg = "" end + if first_input_weights!='' && epoch == 0 + input_weights = "--input_weights #{first_input_weights}" + end pids << Kernel.fork { - `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg}\ + `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\ --input #{input_files[shard]}\ - --refs #{refs_files[shard]} #{input_weights}\ + --refs #{refs_files[shard]}\ --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}` } weights_files << "work/weights.#{shard}.#{epoch}" |