From c171ea9c37bf170b91946e0f5d22e7fd0d2c5825 Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 10 Sep 2013 19:54:40 +0200
Subject: do pclr after sentences..
---
 training/dtrain/examples/standard/dtrain.ini | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 23e94285..07350a0b 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,6 +1,6 @@
 input=./nc-wmt11.de.gz
 refs=./nc-wmt11.en.gz
-output=-                  # a weights file (add .gz for gzip compression) or STDOUT '-'
+output=asdf                  # a weights file (add .gz for gzip compression) or STDOUT '-'
 select_weights=VOID       # output average (over epochs) weight vector
 decoder_config=./cdec.ini # config for cdec
 # weights for these features will be printed on each iteration
@@ -22,3 +22,4 @@ pair_sampling=XYX        #
 hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here
 pair_threshold=0         # minimum distance in BLEU (here: > 0)
 loss_margin=0            # update if correctly ranked, but within this margin
+pclr=1
-- 
cgit v1.2.3
From d6265f937e60f53f228feda9934314de5d88f2d0 Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 10 Sep 2013 20:03:22 +0200
Subject: rm debug stuff
---
 training/dtrain/dtrain.cc                    | 9 ---------
 training/dtrain/examples/standard/dtrain.ini | 1 -
 2 files changed, 10 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 2d090666..5dfd6286 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -537,15 +537,6 @@ main(int argc, char** argv)
     Weights::WriteToFile(w_fn, dense_weights, true);
   }
 
-  WriteFile of("-");
-  ostream& o = *of.stream();
-  o << "<<<<<<<<<<<<<<<<<<<<<<<<\n";
-  for (SparseVector::iterator it = learning_rates.begin(); it != learning_rates.end(); ++it) {
-	  if (it->second == 0) continue;
-      o << FD::Convert(it->first) << '\t' << it->second << endl;
-  }
-  o << ">>>>>>>>>>>>>>>>>>>>>>>>>\n";
-
   } // outer loop
 
   if (average) w_average /= (weight_t)T;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 07350a0b..c0912a62 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -22,4 +22,3 @@ pair_sampling=XYX        #
 hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here
 pair_threshold=0         # minimum distance in BLEU (here: > 0)
 loss_margin=0            # update if correctly ranked, but within this margin
-pclr=1
-- 
cgit v1.2.3
From 8fae8c224fc7a8f8a858ed9a022992d020057f65 Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 8 Oct 2013 13:57:45 +0200
Subject: dtrain: added pclr variants and new expected-output; fixed bug in
 soft syntax features
---
 decoder/ff_soft_syntax.cc                         |   2 +-
 training/dtrain/dtrain.cc                         |  31 +++---
 training/dtrain/examples/standard/dtrain.ini      |   6 +-
 training/dtrain/examples/standard/expected-output | 115 +++++++++++++---------
 training/dtrain/parallelize.rb                    |  11 ++-
 5 files changed, 101 insertions(+), 64 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/decoder/ff_soft_syntax.cc b/decoder/ff_soft_syntax.cc
index 9981fa45..d84f2e6d 100644
--- a/decoder/ff_soft_syntax.cc
+++ b/decoder/ff_soft_syntax.cc
@@ -21,7 +21,7 @@ using namespace std;
 struct SoftSyntacticFeaturesImpl {
   SoftSyntacticFeaturesImpl(const string& param) {
     vector labels = SplitOnWhitespace(param);
-	for (unsigned int i = 0; i < labels.size(); i++) 
+	//for (unsigned int i = 0; i < labels.size(); i++) 
       //cerr << "Labels: " << labels.at(i) << endl;
     for (unsigned int i = 0; i < labels.size(); i++) {
       string label = labels.at(i);
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 9d60a903..38a9b69a 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -40,7 +40,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
     ("scale_bleu_diff",   po::value()->zero_tokens(),                      "learning rate <- bleu diff of a misranked pair")
     ("loss_margin",       po::value()->default_value(0.),  "update if no error in pref pair but model scores this near")
     ("max_pairs",         po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.")
-    ("pclr",              po::value()->zero_tokens(),                         "use a (simple) per-coordinate learning rate")
+    ("pclr",              po::value()->default_value("no"),         "use a (simple|adagrad) per-coordinate learning rate")
     ("noup",              po::value()->zero_tokens(),                                               "do not update weights");
   po::options_description cl("Command Line Options");
   cl.add_options()
@@ -125,8 +125,7 @@ main(int argc, char** argv)
   if (loss_margin > 9998.) loss_margin = std::numeric_limits::max();
   bool scale_bleu_diff = false;
   if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
-  bool pclr = false;
-  if (cfg.count("pclr")) pclr = true;
+  const string pclr = cfg["pclr"].as();
   bool average = false;
   if (select_weights == "avg")
     average = true;
@@ -190,7 +189,6 @@ main(int argc, char** argv)
   weight_t gamma = cfg["gamma"].as();
 
   // faster perceptron: consider only misranked pairs, see
-  // DO NOT ENABLE  WITH SVM (gamma > 0) OR loss_margin!
   bool faster_perceptron = false;
   if (gamma==0 && loss_margin==0) faster_perceptron = true;
 
@@ -251,8 +249,7 @@ main(int argc, char** argv)
       cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as() << "'" << endl;
     if (rescale)
       cerr << setw(25) << "rescale " << rescale << endl;
-    if (pclr)
-      cerr << setw(25) << "pclr " << pclr << endl;
+    cerr << setw(25) << "pclr " << pclr << endl;
     cerr << setw(25) << "max pairs " << max_pairs << endl;
     cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
     cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
@@ -392,22 +389,30 @@ main(int argc, char** argv)
         if (scale_bleu_diff) eta = it->first.score - it->second.score;
         if (rank_error || margin < loss_margin) {
           SparseVector diff_vec = it->first.f - it->second.f;
-          if (pclr) {
+          if (pclr != "no") {
             sum_up += diff_vec;
           } else {
             lambdas.plus_eq_v_times_s(diff_vec, eta);
+            if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); // FIXME
           }
-          if (gamma)
-            lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
         }
       }
 
       // per-coordinate learning rate
-      if (pclr) {
+      if (pclr != "no") {
         SparseVector::iterator it = sum_up.begin();
-        for (; it != lambdas.end(); ++it) {
-          learning_rates[it->first]++;
-          lambdas[it->first] += it->second / learning_rates[it->first]; //* max(0.00000001, eta/(eta+learning_rates[it->first]));
+        for (; it != sum_up.end(); ++it) {
+          if (pclr == "simple") {
+           lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]);
+           learning_rates[it->first]++;
+          } else if (pclr == "adagrad") {
+            if (learning_rates[it->first] == 0) {
+             lambdas[it->first] +=  it->second * eta;
+            } else {
+             lambdas[it->first] +=  it->second * eta * learning_rates[it->first];
+            }
+            learning_rates[it->first] += pow(it->second, 2.0);
+          }
         }
       }
 
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index c0912a62..e6d6382e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,6 +1,6 @@
 input=./nc-wmt11.de.gz
 refs=./nc-wmt11.en.gz
-output=asdf                  # a weights file (add .gz for gzip compression) or STDOUT '-'
+output=-                  # a weights file (add .gz for gzip compression) or STDOUT '-'
 select_weights=VOID       # output average (over epochs) weight vector
 decoder_config=./cdec.ini # config for cdec
 # weights for these features will be printed on each iteration
@@ -10,11 +10,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
 stop_after=10 # stop epoch after 10 inputs
 
 # interesting stuff
-epochs=2                 # run over input 2 times
+epochs=3                 # run over input 3 times
 k=100                    # use 100best lists
 N=4                      # optimize (approx) BLEU4
 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0        # learning rate, don't care if gamma=0 (perceptron)
+learning_rate=1.0        # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
 gamma=0                  # use SVM reg
 sample_from=kbest        # use kbest lists (as opposed to forest)
 filter=uniq              # only unique entries in kbest (surface form)
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index 21f91244..a35bbe6f 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,13 +4,13 @@ Reading ./nc-wmt11.en.srilm.gz
 ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
 ****************************************************************************************************
   Example feature: Shape_S00000_T00000
-Seeding random number sequence to 970626287
+Seeding random number sequence to 4049211323
 
 dtrain
 Parameters:
                        k 100
                        N 4
-                       T 2
+                       T 3
                   scorer 'fixed_stupid_bleu'
              sample from 'kbest'
                   filter 'uniq'
@@ -23,6 +23,7 @@ Parameters:
           pair threshold 0
           select weights 'VOID'
                   l1 reg 0 'none'
+                    pclr no
                max pairs 4294967295
                 cdec cfg './cdec.ini'
                    input './nc-wmt11.de.gz'
@@ -30,62 +31,88 @@ Parameters:
                   output '-'
               stop_after 10
 (a dot represents 10 inputs)
-Iteration #1 of 2.
+Iteration #1 of 3.
  . 10
 Stopping after 10 input sentences.
 WEIGHTS
-              Glue = -614
-       WordPenalty = +1256.8
-     LanguageModel = +5610.5
- LanguageModel_OOV = -1449
-     PhraseModel_0 = -2107
-     PhraseModel_1 = -4666.1
-     PhraseModel_2 = -2713.5
-     PhraseModel_3 = +4204.3
-     PhraseModel_4 = -1435.8
-     PhraseModel_5 = +916
-     PhraseModel_6 = +190
-       PassThrough = -2527
+              Glue = -1100
+       WordPenalty = -82.082
+     LanguageModel = -3199.1
+ LanguageModel_OOV = -192
+     PhraseModel_0 = +3128.2
+     PhraseModel_1 = -1610.2
+     PhraseModel_2 = -4336.5
+     PhraseModel_3 = +2910.3
+     PhraseModel_4 = +2523.2
+     PhraseModel_5 = +506
+     PhraseModel_6 = +1467
+       PassThrough = -387
         ---
-       1best avg score: 0.17874 (+0.17874)
- 1best avg model score: 88399 (+88399)
-           avg # pairs: 798.2 (meaningless)
-        avg # rank err: 798.2
+       1best avg score: 0.16966 (+0.16966)
+ 1best avg model score: 2.9874e+05 (+2.9874e+05)
+           avg # pairs: 906.3 (meaningless)
+        avg # rank err: 906.3
      avg # margin viol: 0
-    non0 feature count: 887
+    non0 feature count: 825
            avg list sz: 91.3
-           avg f count: 126.85
-(time 0.33 min, 2 s/S)
+           avg f count: 139.77
+(time 0.35 min, 2.1 s/S)
 
-Iteration #2 of 2.
+Iteration #2 of 3.
  . 10
 WEIGHTS
-              Glue = -1025
-       WordPenalty = +1751.5
-     LanguageModel = +10059
- LanguageModel_OOV = -4490
-     PhraseModel_0 = -2640.7
-     PhraseModel_1 = -3757.4
-     PhraseModel_2 = -1133.1
-     PhraseModel_3 = +1837.3
-     PhraseModel_4 = -3534.3
-     PhraseModel_5 = +2308
-     PhraseModel_6 = +1677
-       PassThrough = -6222
+              Glue = -1221
+       WordPenalty = +836.89
+     LanguageModel = +2332.3
+ LanguageModel_OOV = -1451
+     PhraseModel_0 = +1507.2
+     PhraseModel_1 = -2728.4
+     PhraseModel_2 = -4183.6
+     PhraseModel_3 = +1816.3
+     PhraseModel_4 = -2894.7
+     PhraseModel_5 = +1403
+     PhraseModel_6 = +35
+       PassThrough = -1097
         ---
-       1best avg score: 0.30764 (+0.12891)
- 1best avg model score: -2.5042e+05 (-3.3882e+05)
-           avg # pairs: 725.9 (meaningless)
-        avg # rank err: 725.9
+       1best avg score: 0.17399 (+0.004325)
+ 1best avg model score: 49369 (-2.4937e+05)
+           avg # pairs: 662.4 (meaningless)
+        avg # rank err: 662.4
      avg # margin viol: 0
-    non0 feature count: 1499
+    non0 feature count: 1235
            avg list sz: 91.3
-           avg f count: 114.34
-(time 0.32 min, 1.9 s/S)
+           avg f count: 125.11
+(time 0.27 min, 1.6 s/S)
+
+Iteration #3 of 3.
+ . 10
+WEIGHTS
+              Glue = -1574
+       WordPenalty = -17.372
+     LanguageModel = +6861.8
+ LanguageModel_OOV = -3997
+     PhraseModel_0 = -398.76
+     PhraseModel_1 = -3419.6
+     PhraseModel_2 = -3186.7
+     PhraseModel_3 = +1050.8
+     PhraseModel_4 = -2902.7
+     PhraseModel_5 = -486
+     PhraseModel_6 = -436
+       PassThrough = -2985
+        ---
+       1best avg score: 0.30742 (+0.13343)
+ 1best avg model score: -1.5393e+05 (-2.0329e+05)
+           avg # pairs: 623.8 (meaningless)
+        avg # rank err: 623.8
+     avg # margin viol: 0
+    non0 feature count: 1770
+           avg list sz: 91.3
+           avg f count: 118.58
+(time 0.25 min, 1.5 s/S)
 
 Writing weights file to '-' ...
 done
 
 ---
-Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764].
-This took 0.65 min.
+Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742].
+This took 0.86667 min.
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 2fc66cab..60ca9422 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -21,7 +21,8 @@ opts = Trollop::options do
   opt :qsub, "use qsub", :type => :bool, :default => false
   opt :dtrain_binary, "path to dtrain binary", :type => :string
   opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
-  opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => :o
+  opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o'
+  opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w'
 end
 usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
 
@@ -54,6 +55,7 @@ input = opts[:input]
 refs  = opts[:references]
 use_qsub       = opts[:qsub]
 shards_at_once = opts[:processes_at_once]
+first_input_weights  = opts[:first_input_weights]
 
 `mkdir work`
 
@@ -137,10 +139,13 @@ end
       else
         cdec_cfg = ""
       end
+      if first_input_weights!='' && epoch == 0
+        input_weights = "--input_weights #{first_input_weights}"
+      end
       pids << Kernel.fork {
-        `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg}\
+        `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\
           --input #{input_files[shard]}\
-          --refs #{refs_files[shard]} #{input_weights}\
+          --refs #{refs_files[shard]}\
           --output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`
       }
       weights_files << "work/weights.#{shard}.#{epoch}"
-- 
cgit v1.2.3
From 035585ee59e593d2b0cc358068d6a5dd639037cc Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Sun, 3 Nov 2013 21:24:51 +0100
Subject: bitext input for dtrain
---
 training/dtrain/Makefile.am                   |   2 +-
 training/dtrain/dtrain.cc                     |  45 ++++++++++++++++++++------
 training/dtrain/dtrain.h                      |   2 ++
 training/dtrain/examples/standard/dtrain.ini  |   5 +--
 training/dtrain/examples/standard/nc-wmt11.gz | Bin 0 -> 113504 bytes
 5 files changed, 41 insertions(+), 13 deletions(-)
 create mode 100644 training/dtrain/examples/standard/nc-wmt11.gz
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/Makefile.am b/training/dtrain/Makefile.am
index 844c790d..ecb6c128 100644
--- a/training/dtrain/Makefile.am
+++ b/training/dtrain/Makefile.am
@@ -1,7 +1,7 @@
 bin_PROGRAMS = dtrain
 
 dtrain_SOURCES = dtrain.cc score.cc dtrain.h kbestget.h ksampler.h pairsampling.h score.h
-dtrain_LDADD   = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a
+dtrain_LDADD   = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a -lboost_regex
 
 AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval
 
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 38a9b69a..a496f08a 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -12,8 +12,9 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
 {
   po::options_description ini("Configuration File Options");
   ini.add_options()
-    ("input",             po::value()->default_value("-"),                                             "input file (src)")
+    ("input",             po::value(),                                                                 "input file (src)")
     ("refs,r",            po::value(),                                                                       "references")
+    ("bitext,b",          po::value(),                                                            "bitext: 'src ||| tgt'")
     ("output",            po::value()->default_value("-"),                          "output weights file, '-' for STDOUT")
     ("input_weights",     po::value(),                                "input weights file (e.g. from previous iteration)")
     ("decoder_config",    po::value(),                                                      "configuration file for cdec")
@@ -73,13 +74,17 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
     cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as() << "'." << endl;
     return false;
   }
-  if(cfg->count("hi_lo") && (*cfg)["pair_sampling"].as() != "XYX") {
+  if (cfg->count("hi_lo") && (*cfg)["pair_sampling"].as() != "XYX") {
     cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl;
   }
-  if((*cfg)["hi_lo"].as() > 0.5 || (*cfg)["hi_lo"].as() < 0.01) {
+  if ((*cfg)["hi_lo"].as() > 0.5 || (*cfg)["hi_lo"].as() < 0.01) {
     cerr << "hi_lo must lie in [0.01, 0.5]" << endl;
     return false;
   }
+  if ((cfg->count("input")>0 || cfg->count("refs")>0) && cfg->count("bitext")>0) {
+    cerr << "Provide 'input' and 'refs' or 'bitext', not both." << endl;
+    return false;
+  }
   if ((*cfg)["pair_threshold"].as() < 0) {
     cerr << "The threshold must be >= 0!" << endl;
     return false;
@@ -208,13 +213,24 @@ main(int argc, char** argv)
   // output
   string output_fn = cfg["output"].as();
   // input
-  string input_fn = cfg["input"].as();
+  bool read_bitext = false;
+  string input_fn;
+  if (cfg.count("bitext")) {
+    read_bitext = true;
+    input_fn = cfg["bitext"].as();
+  } else {
+    input_fn = cfg["input"].as();
+  }
   ReadFile input(input_fn);
   // buffer input for t > 0
   vector src_str_buf;          // source strings (decoder takes only strings)
   vector > ref_ids_buf; // references as WordID vecs
-  string refs_fn = cfg["refs"].as();
-  ReadFile refs(refs_fn);
+  ReadFile refs;
+  string refs_fn;
+  if (!read_bitext) {
+    refs_fn = cfg["refs"].as();
+    refs.Init(refs_fn);
+  }
 
   unsigned in_sz = std::numeric_limits::max(); // input index, input size
   vector > all_scores;
@@ -253,7 +269,8 @@ main(int argc, char** argv)
     cerr << setw(25) << "max pairs " << max_pairs << endl;
     cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
     cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
-    cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
+    if (!read_bitext)
+      cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
     cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
     if (cfg.count("input_weights"))
       cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as() << "'" << endl;
@@ -279,9 +296,16 @@ main(int argc, char** argv)
   {
 
     string in;
+    string ref;
     bool next = false, stop = false; // next iteration or premature stop
     if (t == 0) {
       if(!getline(*input, in)) next = true;
+      if(read_bitext) {
+        vector strs;
+        boost::algorithm::split_regex(strs, in, boost::regex(" \\|\\|\\| "));
+        in = strs[0];
+        ref = strs[1];
+      }
     } else {
       if (ii == in_sz) next = true; // stop if we reach the end of our input
     }
@@ -318,10 +342,11 @@ main(int argc, char** argv)
     // getting input
     vector ref_ids; // reference as vector
     if (t == 0) {
-      string r_;
-      getline(*refs, r_);
+      if (!read_bitext) {
+        getline(*refs, ref);
+      }
       vector ref_tok;
-      boost::split(ref_tok, r_, boost::is_any_of(" "));
+      boost::split(ref_tok, ref, boost::is_any_of(" "));
       register_and_convert(ref_tok, ref_ids);
       ref_ids_buf.push_back(ref_ids);
       src_str_buf.push_back(in);
diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h
index 3981fb39..ccb5ad4d 100644
--- a/training/dtrain/dtrain.h
+++ b/training/dtrain/dtrain.h
@@ -9,6 +9,8 @@
 #include 
 
 #include 
+#include 
+#include 
 #include 
 
 #include "decoder.h"
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index e6d6382e..7dbb4ff0 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,5 +1,6 @@
-input=./nc-wmt11.de.gz
-refs=./nc-wmt11.en.gz
+#input=./nc-wmt11.de.gz
+#refs=./nc-wmt11.en.gz
+bitext=./nc-wmt11.gz
 output=-                  # a weights file (add .gz for gzip compression) or STDOUT '-'
 select_weights=VOID       # output average (over epochs) weight vector
 decoder_config=./cdec.ini # config for cdec
diff --git a/training/dtrain/examples/standard/nc-wmt11.gz b/training/dtrain/examples/standard/nc-wmt11.gz
new file mode 100644
index 00000000..c39c5aef
Binary files /dev/null and b/training/dtrain/examples/standard/nc-wmt11.gz differ
-- 
cgit v1.2.3
From a6d8ae2bd3cc2294e17588656e6aa20a96f6fcbc Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 12 Nov 2013 18:36:03 +0100
Subject: implemented batch tuning
---
 training/dtrain/dtrain.cc                    | 81 ++++++++++++++++++++++------
 training/dtrain/examples/standard/dtrain.ini |  4 +-
 2 files changed, 67 insertions(+), 18 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index a496f08a..23131810 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -42,6 +42,9 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
     ("loss_margin",       po::value()->default_value(0.),  "update if no error in pref pair but model scores this near")
     ("max_pairs",         po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.")
     ("pclr",              po::value()->default_value("no"),         "use a (simple|adagrad) per-coordinate learning rate")
+    ("batch",             po::value()->zero_tokens(),                                               "do batch optimization")
+    //("repeat",            po::value()->default_value(1),          "repeat optimization over kbest list this number of times")
+    //("test-k-best",       po::value()->zero_tokens(),                       "check if optimization works (use repeat >= 2)")
     ("noup",              po::value()->zero_tokens(),                                               "do not update weights");
   po::options_description cl("Command Line Options");
   cl.add_options()
@@ -126,7 +129,12 @@ main(int argc, char** argv)
   const float hi_lo = cfg["hi_lo"].as();
   const score_t approx_bleu_d = cfg["approx_bleu_d"].as();
   const unsigned max_pairs = cfg["max_pairs"].as();
+  //int repeat = cfg["repeat"].as();
+  //bool test_k_best = false;
+  //if (cfg.count("test-k-best")) test_k_best = true;
   weight_t loss_margin = cfg["loss_margin"].as();
+  bool batch = false;
+  if (cfg.count("batch")) batch = true;
   if (loss_margin > 9998.) loss_margin = std::numeric_limits::max();
   bool scale_bleu_diff = false;
   if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
@@ -184,10 +192,10 @@ main(int argc, char** argv)
   observer->SetScorer(scorer);
 
   // init weights
-  vector& dense_weights = decoder.CurrentWeightVector();
+  vector& decoder_weights = decoder.CurrentWeightVector();
   SparseVector lambdas, cumulative_penalties, w_average;
-  if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as(), &dense_weights);
-  Weights::InitSparseVector(dense_weights, &lambdas);
+  if (cfg.count("input_weights")) Weights::InitFromFile(cfg["input_weights"].as(), &decoder_weights);
+  Weights::InitSparseVector(decoder_weights, &lambdas);
 
   // meta params for perceptron, SVM
   weight_t eta = cfg["learning_rate"].as();
@@ -245,6 +253,7 @@ main(int argc, char** argv)
     cerr << setw(25) << "k " << k << endl;
     cerr << setw(25) << "N " << N << endl;
     cerr << setw(25) << "T " << T << endl;
+    cerr << setw(25) << "batch " << batch << endl;
     cerr << setw(26) << "scorer '" << scorer_str << "'" << endl;
     if (scorer_str == "approx_bleu")
       cerr << setw(25) << "approx. B discount " << approx_bleu_d << endl;
@@ -267,6 +276,8 @@ main(int argc, char** argv)
       cerr << setw(25) << "rescale " << rescale << endl;
     cerr << setw(25) << "pclr " << pclr << endl;
     cerr << setw(25) << "max pairs " << max_pairs << endl;
+    //cerr << setw(25) << "repeat " << repeat << endl;
+    //cerr << setw(25) << "test k-best " << test_k_best << endl;
     cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
     cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
     if (!read_bitext)
@@ -281,17 +292,25 @@ main(int argc, char** argv)
 
   // pclr
   SparseVector learning_rates;
+  // batch
+  SparseVector batch_updates;
+  weight_t batch_loss;
+
+  //int did_improve; // FIXME for test-k-best
 
   for (unsigned t = 0; t < T; t++) // T epochs
   {
-
+  
   time_t start, end;
   time(&start);
   score_t score_sum = 0.;
   score_t model_sum(0);
   unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0;
+  batch_loss = 0.;
   if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl;
 
+  //did_improve = 0;
+
   while(true)
   {
 
@@ -337,7 +356,7 @@ main(int argc, char** argv)
     if (next || stop) break;
 
     // weights
-    lambdas.init_vector(&dense_weights);
+    lambdas.init_vector(&decoder_weights);
 
     // getting input
     vector ref_ids; // reference as vector
@@ -392,33 +411,51 @@ main(int argc, char** argv)
         partXYX(samples, pairs, pair_threshold, max_pairs, faster_perceptron, hi_lo);
       if (pair_sampling == "PRO")
         PROsampling(samples, pairs, pair_threshold, max_pairs);
-      npairs += pairs.size();
+      int cur_npairs = pairs.size();
+      npairs += cur_npairs;
+
+      weight_t kbest_loss_first, kbest_loss_last = 0.0;
 
+//for (int q=0; q < repeat; q++) { // repeat
+
+      weight_t kbest_loss = 0.0; // test-k-best
       SparseVector lambdas_copy; // for l1 regularization
       SparseVector sum_up; // for pclr
       if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
 
       for (vector >::iterator it = pairs.begin();
            it != pairs.end(); it++) {
-        bool rank_error;
+
+        /*if (repeat > 1) {
+          double x = max(0.0, -1.0 * (lambdas.dot(it->first.f) - lambdas.dot(it->second.f))); 
+          kbest_loss += x;
+        }*/
+
+        score_t model_diff = it->first.model - it->second.model;
+        bool rank_error = false;
         score_t margin;
         if (faster_perceptron) { // we only have considering misranked pairs
           rank_error = true; // pair sampling already did this for us
           margin = std::numeric_limits::max();
         } else {
-          rank_error = it->first.model <= it->second.model;
-          margin = fabs(it->first.model - it->second.model);
+          rank_error = model_diff<=0.0;
+          margin = fabs(model_diff);
           if (!rank_error && margin < loss_margin) margin_violations++;
         }
         if (rank_error) rank_errors++;
         if (scale_bleu_diff) eta = it->first.score - it->second.score;
         if (rank_error || margin < loss_margin) {
           SparseVector diff_vec = it->first.f - it->second.f;
+          if (batch) {
+            batch_loss += max(0., -1.0*model_diff);
+            batch_updates += diff_vec;
+            continue;
+          }
           if (pclr != "no") {
             sum_up += diff_vec;
           } else {
             lambdas.plus_eq_v_times_s(diff_vec, eta);
-            if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); // FIXME
+            if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./cur_npairs));
           }
         }
       }
@@ -487,6 +524,11 @@ main(int argc, char** argv)
         }
       }
 
+      //if (q==0)  {  kbest_loss_first = kbest_loss; }
+      //if (q==repeat-1) {  kbest_loss_last = kbest_loss; }
+//}//repeat
+//if((kbest_loss_first - kbest_loss_last) > 0) did_improve++;
+
     }
 
     if (rescale) lambdas /= lambdas.l2norm();
@@ -495,14 +537,20 @@ main(int argc, char** argv)
 
   } // input loop
 
-  if (average) w_average += lambdas;
+  if (t == 0) in_sz = ii; // remember size of input (# lines)
 
-  if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset();
+  //if (repeat > 1) cout << "did improve? " << did_improve << " out of " << in_sz << endl; 
 
-  if (t == 0) {
-    in_sz = ii; // remember size of input (# lines)
+  if (batch) {
+    lambdas.plus_eq_v_times_s(batch_updates, eta);
+    if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
+    batch_updates.clear();
   }
 
+  if (average) w_average += lambdas;
+
+  if (scorer_str == "approx_bleu" || scorer_str == "lc_bleu") scorer->Reset();
+
   // print some stats
   score_t score_avg = score_sum/(score_t)in_sz;
   score_t model_avg = model_sum/(score_t)in_sz;
@@ -534,6 +582,7 @@ main(int argc, char** argv)
     cerr << endl;
     cerr << "        avg # rank err: ";
     cerr << rank_errors/(float)in_sz << endl;
+    if (batch) cerr << "            batch loss: " << batch_loss << endl;
     cerr << "     avg # margin viol: ";
     cerr << margin_violations/(float)in_sz << endl;
     cerr << "    non0 feature count: " <<  nonz << endl;
@@ -562,9 +611,9 @@ main(int argc, char** argv)
 
   // write weights to file
   if (select_weights == "best" || keep) {
-    lambdas.init_vector(&dense_weights);
+    lambdas.init_vector(&decoder_weights);
     string w_fn = "weights." + boost::lexical_cast(t) + ".gz";
-    Weights::WriteToFile(w_fn, dense_weights, true);
+    Weights::WriteToFile(w_fn, decoder_weights, true);
   }
 
   } // outer loop
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 7dbb4ff0..4d096dfb 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
 stop_after=10 # stop epoch after 10 inputs
 
 # interesting stuff
-epochs=3                 # run over input 3 times
+epochs=100                 # run over input 3 times
 k=100                    # use 100best lists
 N=4                      # optimize (approx) BLEU4
 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0        # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
+learning_rate=0.0001       # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
 gamma=0                  # use SVM reg
 sample_from=kbest        # use kbest lists (as opposed to forest)
 filter=uniq              # only unique entries in kbest (surface form)
-- 
cgit v1.2.3
From 29473017d0f0cdd6f383d253235e2f3388533d13 Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 12 Nov 2013 20:07:47 +0100
Subject: impl repeat param
---
 training/dtrain/dtrain.cc                    | 78 ++++++++++++++++------------
 training/dtrain/examples/standard/dtrain.ini |  6 ++-
 2 files changed, 49 insertions(+), 35 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 23131810..441e2cd7 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -43,7 +43,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
     ("max_pairs",         po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.")
     ("pclr",              po::value()->default_value("no"),         "use a (simple|adagrad) per-coordinate learning rate")
     ("batch",             po::value()->zero_tokens(),                                               "do batch optimization")
-    //("repeat",            po::value()->default_value(1),          "repeat optimization over kbest list this number of times")
+    ("repeat",            po::value()->default_value(1),          "repeat optimization over kbest list this number of times")
     //("test-k-best",       po::value()->zero_tokens(),                       "check if optimization works (use repeat >= 2)")
     ("noup",              po::value()->zero_tokens(),                                               "do not update weights");
   po::options_description cl("Command Line Options");
@@ -129,7 +129,7 @@ main(int argc, char** argv)
   const float hi_lo = cfg["hi_lo"].as();
   const score_t approx_bleu_d = cfg["approx_bleu_d"].as();
   const unsigned max_pairs = cfg["max_pairs"].as();
-  //int repeat = cfg["repeat"].as();
+  int repeat = cfg["repeat"].as();
   //bool test_k_best = false;
   //if (cfg.count("test-k-best")) test_k_best = true;
   weight_t loss_margin = cfg["loss_margin"].as();
@@ -276,7 +276,7 @@ main(int argc, char** argv)
       cerr << setw(25) << "rescale " << rescale << endl;
     cerr << setw(25) << "pclr " << pclr << endl;
     cerr << setw(25) << "max pairs " << max_pairs << endl;
-    //cerr << setw(25) << "repeat " << repeat << endl;
+    cerr << setw(25) << "repeat " << repeat << endl;
     //cerr << setw(25) << "test k-best " << test_k_best << endl;
     cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
     cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
@@ -294,23 +294,19 @@ main(int argc, char** argv)
   SparseVector learning_rates;
   // batch
   SparseVector batch_updates;
-  weight_t batch_loss;
-
-  //int did_improve; // FIXME for test-k-best
+  score_t batch_loss;
 
   for (unsigned t = 0; t < T; t++) // T epochs
   {
-  
+
   time_t start, end;
   time(&start);
   score_t score_sum = 0.;
   score_t model_sum(0);
-  unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0;
+  unsigned ii = 0, rank_errors = 0, margin_violations = 0, npairs = 0, f_count = 0, list_sz = 0, kbest_loss_improve = 0;
   batch_loss = 0.;
   if (!quiet) cerr << "Iteration #" << t+1 << " of " << T << "." << endl;
 
-  //did_improve = 0;
-
   while(true)
   {
 
@@ -395,8 +391,10 @@ main(int argc, char** argv)
       }
     }
 
-    score_sum += (*samples)[0].score; // stats for 1best
-    model_sum += (*samples)[0].model;
+    if (repeat == 1) {
+      score_sum += (*samples)[0].score; // stats for 1best
+      model_sum += (*samples)[0].model;
+    }
 
     f_count += observer->get_f_count();
     list_sz += observer->get_sz();
@@ -414,24 +412,22 @@ main(int argc, char** argv)
       int cur_npairs = pairs.size();
       npairs += cur_npairs;
 
-      weight_t kbest_loss_first, kbest_loss_last = 0.0;
+      score_t kbest_loss_first, kbest_loss_last = 0.0;
 
-//for (int q=0; q < repeat; q++) { // repeat
+      for (int ki=0; ki < repeat; ki++) {
 
-      weight_t kbest_loss = 0.0; // test-k-best
+      score_t kbest_loss = 0.0; // test-k-best
       SparseVector lambdas_copy; // for l1 regularization
       SparseVector sum_up; // for pclr
       if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
 
       for (vector >::iterator it = pairs.begin();
            it != pairs.end(); it++) {
-
-        /*if (repeat > 1) {
-          double x = max(0.0, -1.0 * (lambdas.dot(it->first.f) - lambdas.dot(it->second.f))); 
-          kbest_loss += x;
-        }*/
-
         score_t model_diff = it->first.model - it->second.model;
+        if (repeat > 1) {
+          model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f);
+          kbest_loss += max(0.0, -1.0 * model_diff);
+        }
         bool rank_error = false;
         score_t margin;
         if (faster_perceptron) { // we only have considering misranked pairs
@@ -442,7 +438,7 @@ main(int argc, char** argv)
           margin = fabs(model_diff);
           if (!rank_error && margin < loss_margin) margin_violations++;
         }
-        if (rank_error) rank_errors++;
+        if (rank_error && ki==1) rank_errors++;
         if (scale_bleu_diff) eta = it->first.score - it->second.score;
         if (rank_error || margin < loss_margin) {
           SparseVector diff_vec = it->first.f - it->second.f;
@@ -524,12 +520,27 @@ main(int argc, char** argv)
         }
       }
 
-      //if (q==0)  {  kbest_loss_first = kbest_loss; }
-      //if (q==repeat-1) {  kbest_loss_last = kbest_loss; }
-//}//repeat
-//if((kbest_loss_first - kbest_loss_last) > 0) did_improve++;
+      if (ki==0) kbest_loss_first = kbest_loss;
+      if (ki==repeat-1) { // done
+        kbest_loss_last = kbest_loss;
+        score_t best_score = -1.;
+        score_t best_model = -std::numeric_limits::max();
+        unsigned best_idx;
+        for (unsigned i=0; i < samples->size(); i++) {
+          score_t s = lambdas.dot((*samples)[i].f);
+          if (s > best_model) {
+            best_idx = i;
+            best_model = s;
+          }
+        }
+        score_sum += (*samples)[best_idx].score;
+        model_sum += best_model;
+      }
+    } // repeat
 
-    }
+    if ((kbest_loss_first - kbest_loss_last) >= 0) kbest_loss_improve++;
+
+    } // noup
 
     if (rescale) lambdas /= lambdas.l2norm();
 
@@ -539,7 +550,6 @@ main(int argc, char** argv)
 
   if (t == 0) in_sz = ii; // remember size of input (# lines)
 
-  //if (repeat > 1) cout << "did improve? " << did_improve << " out of " << in_sz << endl; 
 
   if (batch) {
     lambdas.plus_eq_v_times_s(batch_updates, eta);
@@ -577,14 +587,16 @@ main(int argc, char** argv)
     cerr << _np << " 1best avg model score: " << model_avg;
     cerr << _p << " (" << model_diff << ")" << endl;
     cerr << "           avg # pairs: ";
-    cerr << _np << npairs/(float)in_sz;
+    cerr << _np << npairs/(float)in_sz << endl;
+    cerr << "     avg # margin viol: ";
+    cerr << margin_violations/(float)in_sz << endl;
+    cerr << "        avg # rank err: ";
+    cerr << rank_errors/(float)in_sz;
     if (faster_perceptron) cerr << " (meaningless)";
     cerr << endl;
-    cerr << "        avg # rank err: ";
-    cerr << rank_errors/(float)in_sz << endl;
     if (batch) cerr << "            batch loss: " << batch_loss << endl;
-    cerr << "     avg # margin viol: ";
-    cerr << margin_violations/(float)in_sz << endl;
+    if (repeat > 1) cerr << "       k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
+
     cerr << "    non0 feature count: " <<  nonz << endl;
     cerr << "           avg list sz: " << list_sz/(float)in_sz << endl;
     cerr << "           avg f count: " << f_count/(float)list_sz << endl;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 4d096dfb..ef022469 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
 stop_after=10 # stop epoch after 10 inputs
 
 # interesting stuff
-epochs=100                 # run over input 3 times
+epochs=3                 # run over input 3 times
 k=100                    # use 100best lists
 N=4                      # optimize (approx) BLEU4
 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=0.0001       # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
+learning_rate=0.0001     # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
 gamma=0                  # use SVM reg
 sample_from=kbest        # use kbest lists (as opposed to forest)
 filter=uniq              # only unique entries in kbest (surface form)
@@ -23,3 +23,5 @@ pair_sampling=XYX        #
 hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here
 pair_threshold=0         # minimum distance in BLEU (here: > 0)
 loss_margin=0            # update if correctly ranked, but within this margin
+repeat=1                 # repeat training on a kbest list 1 times 
+#batch=true              # batch tuning, update after accumulating over all sentences and all kbest lists
-- 
cgit v1.2.3
From 2d025c839e474045d81b7490adc8842ad427c4e1 Mon Sep 17 00:00:00 2001
From: Patrick Simianer 
Date: Tue, 12 Nov 2013 20:39:59 +0100
Subject: fix
---
 training/dtrain/dtrain.cc                         |  36 ++++---
 training/dtrain/examples/standard/dtrain.ini      |   2 +-
 training/dtrain/examples/standard/expected-output | 112 +++++++++++-----------
 3 files changed, 80 insertions(+), 70 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 441e2cd7..0a27a068 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -414,6 +414,12 @@ main(int argc, char** argv)
 
       score_t kbest_loss_first, kbest_loss_last = 0.0;
 
+      for (vector >::iterator it = pairs.begin();
+           it != pairs.end(); it++) {
+        score_t model_diff = it->first.model - it->second.model;
+        kbest_loss_first += max(0.0, -1.0 * model_diff);
+      }
+
       for (int ki=0; ki < repeat; ki++) {
 
       score_t kbest_loss = 0.0; // test-k-best
@@ -520,21 +526,22 @@ main(int argc, char** argv)
         }
       }
 
-      if (ki==0) kbest_loss_first = kbest_loss;
       if (ki==repeat-1) { // done
         kbest_loss_last = kbest_loss;
-        score_t best_score = -1.;
-        score_t best_model = -std::numeric_limits::max();
-        unsigned best_idx;
-        for (unsigned i=0; i < samples->size(); i++) {
-          score_t s = lambdas.dot((*samples)[i].f);
-          if (s > best_model) {
-            best_idx = i;
-            best_model = s;
+        if (repeat > 1) {
+          score_t best_score = -1.;
+          score_t best_model = -std::numeric_limits::max();
+          unsigned best_idx;
+          for (unsigned i=0; i < samples->size(); i++) {
+            score_t s = lambdas.dot((*samples)[i].f);
+            if (s > best_model) {
+              best_idx = i;
+              best_model = s;
+            }
           }
+          score_sum += (*samples)[best_idx].score;
+          model_sum += best_model;
         }
-        score_sum += (*samples)[best_idx].score;
-        model_sum += best_model;
       }
     } // repeat
 
@@ -588,15 +595,14 @@ main(int argc, char** argv)
     cerr << _p << " (" << model_diff << ")" << endl;
     cerr << "           avg # pairs: ";
     cerr << _np << npairs/(float)in_sz << endl;
-    cerr << "     avg # margin viol: ";
-    cerr << margin_violations/(float)in_sz << endl;
     cerr << "        avg # rank err: ";
     cerr << rank_errors/(float)in_sz;
     if (faster_perceptron) cerr << " (meaningless)";
     cerr << endl;
+    cerr << "     avg # margin viol: ";
+    cerr << margin_violations/(float)in_sz << endl;
     if (batch) cerr << "            batch loss: " << batch_loss << endl;
-    if (repeat > 1) cerr << "       k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
-
+    cerr << "       k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
     cerr << "    non0 feature count: " <<  nonz << endl;
     cerr << "           avg list sz: " << list_sz/(float)in_sz << endl;
     cerr << "           avg f count: " << f_count/(float)list_sz << endl;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index ef022469..fc83f08e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -15,7 +15,7 @@ epochs=3                 # run over input 3 times
 k=100                    # use 100best lists
 N=4                      # optimize (approx) BLEU4
 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=0.0001     # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
+learning_rate=0.1        # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
 gamma=0                  # use SVM reg
 sample_from=kbest        # use kbest lists (as opposed to forest)
 filter=uniq              # only unique entries in kbest (surface form)
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index a35bbe6f..75f47337 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,17 +4,18 @@ Reading ./nc-wmt11.en.srilm.gz
 ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
 ****************************************************************************************************
   Example feature: Shape_S00000_T00000
-Seeding random number sequence to 4049211323
+Seeding random number sequence to 3751911392
 
 dtrain
 Parameters:
                        k 100
                        N 4
                        T 3
+                   batch 0
                   scorer 'fixed_stupid_bleu'
              sample from 'kbest'
                   filter 'uniq'
-           learning rate 1
+           learning rate 0.1
                    gamma 0
              loss margin 0
        faster perceptron 1
@@ -25,9 +26,9 @@ Parameters:
                   l1 reg 0 'none'
                     pclr no
                max pairs 4294967295
+                  repeat 1
                 cdec cfg './cdec.ini'
-                   input './nc-wmt11.de.gz'
-                    refs './nc-wmt11.en.gz'
+                   input './nc-wmt11.gz'
                   output '-'
               stop_after 10
 (a dot represents 10 inputs)
@@ -35,25 +36,26 @@ Iteration #1 of 3.
  . 10
 Stopping after 10 input sentences.
 WEIGHTS
-              Glue = -1100
-       WordPenalty = -82.082
-     LanguageModel = -3199.1
- LanguageModel_OOV = -192
-     PhraseModel_0 = +3128.2
-     PhraseModel_1 = -1610.2
-     PhraseModel_2 = -4336.5
-     PhraseModel_3 = +2910.3
-     PhraseModel_4 = +2523.2
-     PhraseModel_5 = +506
-     PhraseModel_6 = +1467
-       PassThrough = -387
+              Glue = -110
+       WordPenalty = -8.2082
+     LanguageModel = -319.91
+ LanguageModel_OOV = -19.2
+     PhraseModel_0 = +312.82
+     PhraseModel_1 = -161.02
+     PhraseModel_2 = -433.65
+     PhraseModel_3 = +291.03
+     PhraseModel_4 = +252.32
+     PhraseModel_5 = +50.6
+     PhraseModel_6 = +146.7
+       PassThrough = -38.7
         ---
        1best avg score: 0.16966 (+0.16966)
- 1best avg model score: 2.9874e+05 (+2.9874e+05)
-           avg # pairs: 906.3 (meaningless)
-        avg # rank err: 906.3
+ 1best avg model score: 29874 (+29874)
+           avg # pairs: 906.3
+        avg # rank err: 0 (meaningless)
      avg # margin viol: 0
-    non0 feature count: 825
+       k-best loss imp: 100%
+    non0 feature count: 832
            avg list sz: 91.3
            avg f count: 139.77
 (time 0.35 min, 2.1 s/S)
@@ -61,25 +63,26 @@ WEIGHTS
 Iteration #2 of 3.
  . 10
 WEIGHTS
-              Glue = -1221
-       WordPenalty = +836.89
-     LanguageModel = +2332.3
- LanguageModel_OOV = -1451
-     PhraseModel_0 = +1507.2
-     PhraseModel_1 = -2728.4
-     PhraseModel_2 = -4183.6
-     PhraseModel_3 = +1816.3
-     PhraseModel_4 = -2894.7
-     PhraseModel_5 = +1403
-     PhraseModel_6 = +35
-       PassThrough = -1097
+              Glue = -122.1
+       WordPenalty = +83.689
+     LanguageModel = +233.23
+ LanguageModel_OOV = -145.1
+     PhraseModel_0 = +150.72
+     PhraseModel_1 = -272.84
+     PhraseModel_2 = -418.36
+     PhraseModel_3 = +181.63
+     PhraseModel_4 = -289.47
+     PhraseModel_5 = +140.3
+     PhraseModel_6 = +3.5
+       PassThrough = -109.7
         ---
        1best avg score: 0.17399 (+0.004325)
- 1best avg model score: 49369 (-2.4937e+05)
-           avg # pairs: 662.4 (meaningless)
-        avg # rank err: 662.4
+ 1best avg model score: 4936.9 (-24937)
+           avg # pairs: 662.4
+        avg # rank err: 0 (meaningless)
      avg # margin viol: 0
-    non0 feature count: 1235
+       k-best loss imp: 100%
+    non0 feature count: 1240
            avg list sz: 91.3
            avg f count: 125.11
 (time 0.27 min, 1.6 s/S)
@@ -87,32 +90,33 @@ WEIGHTS
 Iteration #3 of 3.
  . 10
 WEIGHTS
-              Glue = -1574
-       WordPenalty = -17.372
-     LanguageModel = +6861.8
- LanguageModel_OOV = -3997
-     PhraseModel_0 = -398.76
-     PhraseModel_1 = -3419.6
-     PhraseModel_2 = -3186.7
-     PhraseModel_3 = +1050.8
-     PhraseModel_4 = -2902.7
-     PhraseModel_5 = -486
-     PhraseModel_6 = -436
-       PassThrough = -2985
+              Glue = -157.4
+       WordPenalty = -1.7372
+     LanguageModel = +686.18
+ LanguageModel_OOV = -399.7
+     PhraseModel_0 = -39.876
+     PhraseModel_1 = -341.96
+     PhraseModel_2 = -318.67
+     PhraseModel_3 = +105.08
+     PhraseModel_4 = -290.27
+     PhraseModel_5 = -48.6
+     PhraseModel_6 = -43.6
+       PassThrough = -298.5
         ---
        1best avg score: 0.30742 (+0.13343)
- 1best avg model score: -1.5393e+05 (-2.0329e+05)
-           avg # pairs: 623.8 (meaningless)
-        avg # rank err: 623.8
+ 1best avg model score: -15393 (-20329)
+           avg # pairs: 623.8
+        avg # rank err: 0 (meaningless)
      avg # margin viol: 0
-    non0 feature count: 1770
+       k-best loss imp: 100%
+    non0 feature count: 1776
            avg list sz: 91.3
            avg f count: 118.58
-(time 0.25 min, 1.5 s/S)
+(time 0.28 min, 1.7 s/S)
 
 Writing weights file to '-' ...
 done
 
 ---
 Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742].
-This took 0.86667 min.
+This took 0.9 min.
-- 
cgit v1.2.3