From 12771b3f0e18274db5cc634f06b94fb1597464df Mon Sep 17 00:00:00 2001 From: Patrick Simianer
Date: Thu, 25 Apr 2013 10:16:39 +0200 Subject: parallellized example --- training/dtrain/examples/parallelized/cdec.ini | 2 +- training/dtrain/examples/parallelized/work/out.0.0 | 9 +++++---- training/dtrain/examples/parallelized/work/out.0.1 | 9 +++++---- training/dtrain/examples/parallelized/work/out.1.0 | 9 +++++---- training/dtrain/examples/parallelized/work/out.1.1 | 9 +++++---- 5 files changed, 21 insertions(+), 17 deletions(-) (limited to 'training/dtrain/examples') diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini index e43ba1c4..5773029a 100644 --- a/training/dtrain/examples/parallelized/cdec.ini +++ b/training/dtrain/examples/parallelized/cdec.ini @@ -4,7 +4,7 @@ intersection_strategy=cube_pruning cubepruning_pop_limit=200 scfg_max_span_limit=15 feature_function=WordPenalty -feature_function=KLanguageModel ../example/nc-wmt11.en.srilm.gz +feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz #feature_function=ArityPenalty #feature_function=CMR2008ReorderingFeatures #feature_function=Dwarf diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0 index 7a00ed0f..c559dd4d 100644 --- a/training/dtrain/examples/parallelized/work/out.0.0 +++ b/training/dtrain/examples/parallelized/work/out.0.0 @@ -1,9 +1,9 @@ cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 3121929377 +Seeding random number sequence to 405292278 dtrain Parameters: @@ -16,6 +16,7 @@ Parameters: learning rate 0.0001 gamma 0 loss margin 1 + faster perceptron 0 pairs 'XYX' hi lo 0.1 pair threshold 0 @@ -51,11 +52,11 @@ WEIGHTS non0 feature count: 12 avg list sz: 100 avg f count: 11.32 -(time 0.37 min, 4.4 s/S) +(time 0.35 min, 4.2 s/S) Writing weights file to 'work/weights.0.0' ... done --- Best iteration: 1 [SCORE 'stupid_bleu'=0.17521]. -This took 0.36667 min. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1 index e2bd6649..8bc7ea9c 100644 --- a/training/dtrain/examples/parallelized/work/out.0.1 +++ b/training/dtrain/examples/parallelized/work/out.0.1 @@ -1,9 +1,9 @@ cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 2767202922 +Seeding random number sequence to 43859692 dtrain Parameters: @@ -16,6 +16,7 @@ Parameters: learning rate 0.0001 gamma 0 loss margin 1 + faster perceptron 0 pairs 'XYX' hi lo 0.1 pair threshold 0 @@ -52,11 +53,11 @@ WEIGHTS non0 feature count: 12 avg list sz: 100 avg f count: 10.496 -(time 0.32 min, 3.8 s/S) +(time 0.35 min, 4.2 s/S) Writing weights file to 'work/weights.0.1' ... done --- Best iteration: 1 [SCORE 'stupid_bleu'=0.26638]. -This took 0.31667 min. +This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0 index 6e790e38..65d1e7dc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.0 +++ b/training/dtrain/examples/parallelized/work/out.1.0 @@ -1,9 +1,9 @@ cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 1432415010 +Seeding random number sequence to 4126799437 dtrain Parameters: @@ -16,6 +16,7 @@ Parameters: learning rate 0.0001 gamma 0 loss margin 1 + faster perceptron 0 pairs 'XYX' hi lo 0.1 pair threshold 0 @@ -51,11 +52,11 @@ WEIGHTS non0 feature count: 11 avg list sz: 100 avg f count: 11.814 -(time 0.45 min, 5.4 s/S) +(time 0.43 min, 5.2 s/S) Writing weights file to 'work/weights.1.0' ... done --- Best iteration: 1 [SCORE 'stupid_bleu'=0.10863]. -This took 0.45 min. +This took 0.43333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1 index 0b984761..f479fbbc 100644 --- a/training/dtrain/examples/parallelized/work/out.1.1 +++ b/training/dtrain/examples/parallelized/work/out.1.1 @@ -1,9 +1,9 @@ cdec cfg 'cdec.ini' Loading the LM will be faster if you build a binary file. -Reading ../example/nc-wmt11.en.srilm.gz +Reading ../standard//nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** -Seeding random number sequence to 1771918374 +Seeding random number sequence to 2112412848 dtrain Parameters: @@ -16,6 +16,7 @@ Parameters: learning rate 0.0001 gamma 0 loss margin 1 + faster perceptron 0 pairs 'XYX' hi lo 0.1 pair threshold 0 @@ -52,11 +53,11 @@ WEIGHTS non0 feature count: 12 avg list sz: 100 avg f count: 11.224 -(time 0.42 min, 5 s/S) +(time 0.45 min, 5.4 s/S) Writing weights file to 'work/weights.1.1' ... done --- Best iteration: 1 [SCORE 'stupid_bleu'=0.13169]. -This took 0.41667 min. +This took 0.45 min. -- cgit v1.2.3 From b6754386f1109b960b05cdf2eabbc97bdd38e8df Mon Sep 17 00:00:00 2001 From: Patrick Simianer
Date: Mon, 29 Apr 2013 15:24:39 +0200
Subject: fix, cleaned up headers
---
training/dtrain/dtrain.cc | 28 +++++---
training/dtrain/dtrain.h | 74 ++++++++++++++++----
training/dtrain/examples/standard/dtrain.ini | 24 +++----
training/dtrain/examples/standard/expected-output | 84 +++++++++++------------
training/dtrain/kbestget.h | 66 +-----------------
training/dtrain/ksampler.h | 5 +-
training/dtrain/score.h | 17 +++--
7 files changed, 144 insertions(+), 154 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 149f87d4..83e4e440 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -1,4 +1,10 @@
#include "dtrain.h"
+#include "score.h"
+#include "kbestget.h"
+#include "ksampler.h"
+#include "pairsampling.h"
+
+using namespace dtrain;
bool
@@ -138,23 +144,23 @@ main(int argc, char** argv)
string scorer_str = cfg["scorer"].as
Date: Thu, 2 May 2013 10:04:07 +0200
Subject: updated example
---
training/dtrain/examples/standard/expected-output | 82 +++++++++++------------
1 file changed, 41 insertions(+), 41 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index 9a25062b..21f91244 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,7 +4,7 @@ Reading ./nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Example feature: Shape_S00000_T00000
-Seeding random number sequence to 1677737427
+Seeding random number sequence to 970626287
dtrain
Parameters:
@@ -34,58 +34,58 @@ Iteration #1 of 2.
. 10
Stopping after 10 input sentences.
WEIGHTS
- Glue = -1155
- WordPenalty = -329.63
- LanguageModel = +3903
- LanguageModel_OOV = -1630
- PhraseModel_0 = +2746.9
- PhraseModel_1 = +1200.3
- PhraseModel_2 = -1004.1
- PhraseModel_3 = +2223.1
- PhraseModel_4 = +551.58
- PhraseModel_5 = +217
- PhraseModel_6 = +1816
- PassThrough = -1603
+ Glue = -614
+ WordPenalty = +1256.8
+ LanguageModel = +5610.5
+ LanguageModel_OOV = -1449
+ PhraseModel_0 = -2107
+ PhraseModel_1 = -4666.1
+ PhraseModel_2 = -2713.5
+ PhraseModel_3 = +4204.3
+ PhraseModel_4 = -1435.8
+ PhraseModel_5 = +916
+ PhraseModel_6 = +190
+ PassThrough = -2527
---
- 1best avg score: 0.19344 (+0.19344)
- 1best avg model score: 81387 (+81387)
- avg # pairs: 616.3 (meaningless)
- avg # rank err: 616.3
+ 1best avg score: 0.17874 (+0.17874)
+ 1best avg model score: 88399 (+88399)
+ avg # pairs: 798.2 (meaningless)
+ avg # rank err: 798.2
avg # margin viol: 0
- non0 feature count: 673
- avg list sz: 90.9
- avg f count: 104.26
-(time 0.38 min, 2.3 s/S)
+ non0 feature count: 887
+ avg list sz: 91.3
+ avg f count: 126.85
+(time 0.33 min, 2 s/S)
Iteration #2 of 2.
. 10
WEIGHTS
- Glue = -994
- WordPenalty = -778.69
- LanguageModel = +2348.9
- LanguageModel_OOV = -1967
- PhraseModel_0 = -412.72
- PhraseModel_1 = +1428.9
- PhraseModel_2 = +1967.4
- PhraseModel_3 = -944.99
- PhraseModel_4 = -239.7
- PhraseModel_5 = +708
- PhraseModel_6 = +645
- PassThrough = -1866
+ Glue = -1025
+ WordPenalty = +1751.5
+ LanguageModel = +10059
+ LanguageModel_OOV = -4490
+ PhraseModel_0 = -2640.7
+ PhraseModel_1 = -3757.4
+ PhraseModel_2 = -1133.1
+ PhraseModel_3 = +1837.3
+ PhraseModel_4 = -3534.3
+ PhraseModel_5 = +2308
+ PhraseModel_6 = +1677
+ PassThrough = -6222
---
- 1best avg score: 0.22395 (+0.03051)
- 1best avg model score: -31388 (-1.1278e+05)
- avg # pairs: 702.3 (meaningless)
- avg # rank err: 702.3
+ 1best avg score: 0.30764 (+0.12891)
+ 1best avg model score: -2.5042e+05 (-3.3882e+05)
+ avg # pairs: 725.9 (meaningless)
+ avg # rank err: 725.9
avg # margin viol: 0
- non0 feature count: 955
+ non0 feature count: 1499
avg list sz: 91.3
- avg f count: 103.45
+ avg f count: 114.34
(time 0.32 min, 1.9 s/S)
Writing weights file to '-' ...
done
---
-Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.22395].
-This took 0.7 min.
+Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764].
+This took 0.65 min.
--
cgit v1.2.3
From 5c4ef3f9206fd7e1ddfe252132582d854d62f0a4 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Mon, 24 Jun 2013 17:45:32 +0200
Subject: documentation
---
training/dtrain/README.md | 11 +++++++++++
training/dtrain/examples/parallelized/dtrain.ini | 2 --
2 files changed, 11 insertions(+), 2 deletions(-)
(limited to 'training/dtrain/examples')
diff --git a/training/dtrain/README.md b/training/dtrain/README.md
index 2ab2f232..2bae6b48 100644
--- a/training/dtrain/README.md
+++ b/training/dtrain/README.md
@@ -17,6 +17,17 @@ To build only parts needed for dtrain do
cd training/dtrain/; make
```
+Ideas
+-----
+ * get approx_bleu to work?
+ * implement minibatches (Minibatch and Parallelization for Online Large Margin Structured Learning)
+ * learning rate 1/T?
+ * use an oracle? mira-like (model vs. BLEU), feature repr. of reference!?
+ * implement lc_bleu properly
+ * merge kbest lists of previous epochs (as MERT does)
+ * ``walk entire regularization path''
+ * rerank after each update?
+
Running
-------
See directories under test/ .
diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini
index f19ef891..0b0932d6 100644
--- a/training/dtrain/examples/parallelized/dtrain.ini
+++ b/training/dtrain/examples/parallelized/dtrain.ini
@@ -11,6 +11,4 @@ pair_sampling=XYX
hi_lo=0.1
select_weights=last
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
-# newer version of the grammar extractor use different feature names:
-#print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
decoder_config=cdec.ini
--
cgit v1.2.3