summaryrefslogtreecommitdiff
path: root/training/dtrain/examples
diff options
context:
space:
mode:
Diffstat (limited to 'training/dtrain/examples')
-rw-r--r--training/dtrain/examples/parallelized/README4
-rw-r--r--training/dtrain/examples/parallelized/cdec.ini2
-rw-r--r--training/dtrain/examples/parallelized/dtrain.ini13
-rw-r--r--training/dtrain/examples/parallelized/in20
-rw-r--r--training/dtrain/examples/parallelized/refs10
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.071
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.171
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.244
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.071
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.171
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.244
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.043
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.144
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.244
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.043
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.144
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.244
-rw-r--r--training/dtrain/examples/parallelized/work/shard.0.0.in8
-rw-r--r--training/dtrain/examples/parallelized/work/shard.0.0.refs5
-rw-r--r--training/dtrain/examples/parallelized/work/shard.1.0.in8
-rw-r--r--training/dtrain/examples/parallelized/work/shard.1.0.refs5
-rw-r--r--training/dtrain/examples/parallelized/work/shard.2.0.in3
-rw-r--r--training/dtrain/examples/parallelized/work/shard.3.0.in1
-rw-r--r--training/dtrain/examples/parallelized/work/weights.024
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.023
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.212
-rw-r--r--training/dtrain/examples/parallelized/work/weights.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.023
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.212
-rw-r--r--training/dtrain/examples/parallelized/work/weights.212
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.011
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.112
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.212
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.012
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.112
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.212
-rw-r--r--training/dtrain/examples/standard/dtrain.ini29
-rw-r--r--training/dtrain/examples/standard/expected-output123
-rw-r--r--training/dtrain/examples/standard/expected-output.gzbin0 -> 21565 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.de.gzbin58324 -> 0 bytes
-rw-r--r--training/dtrain/examples/standard/nc-wmt11.en.gzbin49600 -> 0 bytes
-rw-r--r--training/dtrain/examples/toy/dtrain.ini13
-rw-r--r--training/dtrain/examples/toy/expected-output88
-rw-r--r--training/dtrain/examples/toy/in2
-rw-r--r--training/dtrain/examples/toy/src2
-rw-r--r--training/dtrain/examples/toy/tgt2
-rw-r--r--training/dtrain/examples/toy/weights4
49 files changed, 711 insertions, 514 deletions
diff --git a/training/dtrain/examples/parallelized/README b/training/dtrain/examples/parallelized/README
index 89715105..c4addd81 100644
--- a/training/dtrain/examples/parallelized/README
+++ b/training/dtrain/examples/parallelized/README
@@ -1,5 +1,5 @@
run for example
- ../../parallelize.rb ./dtrain.ini 4 false 2 2 ./in ./refs
+ ../../parallelize.rb -c dtrain.ini -s 4 -e 3 -d ../../dtrain -p 2 -i in
-final weights will be in the file work/weights.3
+final weights will be in the file work/weights.2
diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini
index 5773029a..733b1653 100644
--- a/training/dtrain/examples/parallelized/cdec.ini
+++ b/training/dtrain/examples/parallelized/cdec.ini
@@ -4,7 +4,7 @@ intersection_strategy=cube_pruning
cubepruning_pop_limit=200
scfg_max_span_limit=15
feature_function=WordPenalty
-feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz
+feature_function=KLanguageModel ../standard/nc-wmt11.en.srilm.gz
#feature_function=ArityPenalty
#feature_function=CMR2008ReorderingFeatures
#feature_function=Dwarf
diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini
index 0b0932d6..9fc205a3 100644
--- a/training/dtrain/examples/parallelized/dtrain.ini
+++ b/training/dtrain/examples/parallelized/dtrain.ini
@@ -1,14 +1,7 @@
k=100
N=4
learning_rate=0.0001
-gamma=0
-loss_margin=1.0
-epochs=1
-scorer=stupid_bleu
-sample_from=kbest
-filter=uniq
-pair_sampling=XYX
-hi_lo=0.1
-select_weights=last
-print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
+error_margin=1.0
+iterations=1
decoder_config=cdec.ini
+print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
diff --git a/training/dtrain/examples/parallelized/in b/training/dtrain/examples/parallelized/in
index 51d01fe7..82555908 100644
--- a/training/dtrain/examples/parallelized/in
+++ b/training/dtrain/examples/parallelized/in
@@ -1,10 +1,10 @@
-<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg>
-<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg>
-<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg>
-<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg>
-<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg>
-<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg>
-<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg>
-<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg>
+<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house
+<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
+<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
+<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
+<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
+<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
+<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows .
+<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
+<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
+<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/refs b/training/dtrain/examples/parallelized/refs
deleted file mode 100644
index 632e27b0..00000000
--- a/training/dtrain/examples/parallelized/refs
+++ /dev/null
@@ -1,10 +0,0 @@
-europe 's divided racial house
-a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
-the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
-while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
-an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
-mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
-it will not , as america 's racial history clearly shows .
-race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
-the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
-this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0
index c559dd4d..77749404 100644
--- a/training/dtrain/examples/parallelized/work/out.0.0
+++ b/training/dtrain/examples/parallelized/work/out.0.0
@@ -1,62 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
+Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 405292278
-
dtrain
Parameters:
k 100
N 4
T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
input 'work/shard.0.0.in'
- refs 'work/shard.0.0.refs'
output 'work/weights.0.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 5
+ .... 3
WEIGHTS
- Glue = +0.2663
- WordPenalty = -0.0079042
- LanguageModel = +0.44782
- LanguageModel_OOV = -0.0401
- PhraseModel_0 = -0.193
- PhraseModel_1 = +0.71321
- PhraseModel_2 = +0.85196
- PhraseModel_3 = -0.43986
- PhraseModel_4 = -0.44803
- PhraseModel_5 = -0.0538
- PhraseModel_6 = -0.1788
- PassThrough = -0.1477
+ Glue = +0.3404
+ WordPenalty = -0.017632
+ LanguageModel = +0.72958
+ LanguageModel_OOV = -0.235
+ PhraseModel_0 = -0.43721
+ PhraseModel_1 = +1.01
+ PhraseModel_2 = +1.3525
+ PhraseModel_3 = -0.25541
+ PhraseModel_4 = -0.78115
+ PhraseModel_5 = +0
+ PhraseModel_6 = -0.3681
+ PassThrough = -0.3304
---
- 1best avg score: 0.17521 (+0.17521)
- 1best avg model score: 21.556 (+21.556)
- avg # pairs: 1671.2
- avg # rank err: 1118.6
- avg # margin viol: 552.6
- non0 feature count: 12
+ 1best avg score: 0.19474 (+0.19474)
+ 1best avg model score: 0.52232
+ avg # pairs: 2513
+ non-0 feature count: 11
avg list sz: 100
- avg f count: 11.32
-(time 0.35 min, 4.2 s/S)
-
-Writing weights file to 'work/weights.0.0' ...
-done
+ avg f count: 11.42
+(time 0.32 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.17521].
-This took 0.35 min.
+Best iteration: 1 [GOLD = 0.19474].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1
index 8bc7ea9c..d0dee623 100644
--- a/training/dtrain/examples/parallelized/work/out.0.1
+++ b/training/dtrain/examples/parallelized/work/out.0.1
@@ -1,63 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
+Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 43859692
-
dtrain
Parameters:
k 100
N 4
T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
input 'work/shard.0.0.in'
- refs 'work/shard.0.0.refs'
output 'work/weights.0.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 5
+ .... 3
WEIGHTS
- Glue = -0.2699
- WordPenalty = +0.080605
- LanguageModel = -0.026572
- LanguageModel_OOV = -0.30025
- PhraseModel_0 = -0.32076
- PhraseModel_1 = +0.67451
- PhraseModel_2 = +0.92
- PhraseModel_3 = -0.36402
- PhraseModel_4 = -0.592
- PhraseModel_5 = -0.0269
- PhraseModel_6 = -0.28755
- PassThrough = -0.33285
+ Glue = -0.40908
+ WordPenalty = +0.12967
+ LanguageModel = +0.39892
+ LanguageModel_OOV = -0.6314
+ PhraseModel_0 = -0.63992
+ PhraseModel_1 = +0.74198
+ PhraseModel_2 = +1.3096
+ PhraseModel_3 = -0.1216
+ PhraseModel_4 = -1.2274
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.21093
+ PassThrough = -0.66155
---
- 1best avg score: 0.26638 (+0.26638)
- 1best avg model score: 53.197 (+53.197)
- avg # pairs: 2028.6
- avg # rank err: 998.2
- avg # margin viol: 918.8
- non0 feature count: 12
+ 1best avg score: 0.15735 (+0.15735)
+ 1best avg model score: 46.831
+ avg # pairs: 2132.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 10.496
-(time 0.35 min, 4.2 s/S)
-
-Writing weights file to 'work/weights.0.1' ...
-done
+ avg f count: 10.64
+(time 0.38 min, 7 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.26638].
-This took 0.35 min.
+Best iteration: 1 [GOLD = 0.15735].
+This took 0.38333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.2 b/training/dtrain/examples/parallelized/work/out.0.2
new file mode 100644
index 00000000..9c4b110b
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.0.2
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.0.0.in'
+ output 'work/weights.0.2'
+ weights in 'work/weights.1'
+(a dot per input)
+Iteration #1 of 1.
+ .... 3
+WEIGHTS
+ Glue = -0.44422
+ WordPenalty = +0.1032
+ LanguageModel = +0.66474
+ LanguageModel_OOV = -0.62252
+ PhraseModel_0 = -0.59993
+ PhraseModel_1 = +0.78992
+ PhraseModel_2 = +1.3149
+ PhraseModel_3 = +0.21434
+ PhraseModel_4 = -1.0174
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.18452
+ PassThrough = -0.65268
+ ---
+ 1best avg score: 0.24722 (+0.24722)
+ 1best avg model score: 61.971
+ avg # pairs: 2017.7
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 10.42
+(time 0.3 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.24722].
+This took 0.3 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0
index 65d1e7dc..3dc4dca6 100644
--- a/training/dtrain/examples/parallelized/work/out.1.0
+++ b/training/dtrain/examples/parallelized/work/out.1.0
@@ -1,62 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
+Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 4126799437
-
dtrain
Parameters:
k 100
N 4
T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
input 'work/shard.1.0.in'
- refs 'work/shard.1.0.refs'
output 'work/weights.1.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 5
+ .... 3
WEIGHTS
- Glue = -0.3815
- WordPenalty = +0.20064
- LanguageModel = +0.95304
- LanguageModel_OOV = -0.264
- PhraseModel_0 = -0.22362
- PhraseModel_1 = +0.12254
- PhraseModel_2 = +0.26328
- PhraseModel_3 = +0.38018
- PhraseModel_4 = -0.48654
- PhraseModel_5 = +0
- PhraseModel_6 = -0.3645
- PassThrough = -0.2216
+ Glue = -0.2722
+ WordPenalty = +0.05433
+ LanguageModel = +0.69948
+ LanguageModel_OOV = -0.2641
+ PhraseModel_0 = -1.4208
+ PhraseModel_1 = -1.563
+ PhraseModel_2 = -0.21051
+ PhraseModel_3 = -0.17764
+ PhraseModel_4 = -1.6583
+ PhraseModel_5 = +0.0794
+ PhraseModel_6 = +0.1528
+ PassThrough = -0.2367
---
- 1best avg score: 0.10863 (+0.10863)
- 1best avg model score: -4.9841 (-4.9841)
- avg # pairs: 1345.4
- avg # rank err: 822.4
- avg # margin viol: 501
- non0 feature count: 11
+ 1best avg score: 0.071329 (+0.071329)
+ 1best avg model score: -41.362
+ avg # pairs: 1862.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.814
-(time 0.43 min, 5.2 s/S)
-
-Writing weights file to 'work/weights.1.0' ...
-done
+ avg f count: 11.847
+(time 0.28 min, 5 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.10863].
-This took 0.43333 min.
+Best iteration: 1 [GOLD = 0.071329].
+This took 0.28333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1
index f479fbbc..79ac35dc 100644
--- a/training/dtrain/examples/parallelized/work/out.1.1
+++ b/training/dtrain/examples/parallelized/work/out.1.1
@@ -1,63 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../standard//nc-wmt11.en.srilm.gz
+Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 2112412848
-
dtrain
Parameters:
k 100
N 4
T 1
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
input 'work/shard.1.0.in'
- refs 'work/shard.1.0.refs'
output 'work/weights.1.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 5
+ .... 3
WEIGHTS
- Glue = -0.3178
- WordPenalty = +0.11092
- LanguageModel = +0.17269
- LanguageModel_OOV = -0.13485
- PhraseModel_0 = -0.45371
- PhraseModel_1 = +0.38789
- PhraseModel_2 = +0.75311
- PhraseModel_3 = -0.38163
- PhraseModel_4 = -0.58817
- PhraseModel_5 = -0.0269
- PhraseModel_6 = -0.27315
- PassThrough = -0.16745
+ Glue = -0.20488
+ WordPenalty = -0.0091745
+ LanguageModel = +0.79433
+ LanguageModel_OOV = -0.4309
+ PhraseModel_0 = -0.56242
+ PhraseModel_1 = +0.85363
+ PhraseModel_2 = +1.3458
+ PhraseModel_3 = -0.13095
+ PhraseModel_4 = -0.94762
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.16003
+ PassThrough = -0.46105
---
- 1best avg score: 0.13169 (+0.13169)
- 1best avg model score: 24.226 (+24.226)
- avg # pairs: 1951.2
- avg # rank err: 985.4
- avg # margin viol: 951
- non0 feature count: 12
+ 1best avg score: 0.13017 (+0.13017)
+ 1best avg model score: 14.53
+ avg # pairs: 1968
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.224
-(time 0.45 min, 5.4 s/S)
-
-Writing weights file to 'work/weights.1.1' ...
-done
+ avg f count: 11
+(time 0.33 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.13169].
-This took 0.45 min.
+Best iteration: 1 [GOLD = 0.13017].
+This took 0.33333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.2 b/training/dtrain/examples/parallelized/work/out.1.2
new file mode 100644
index 00000000..8c4f8b03
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.1.2
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.1.0.in'
+ output 'work/weights.1.2'
+ weights in 'work/weights.1'
+(a dot per input)
+Iteration #1 of 1.
+ .... 3
+WEIGHTS
+ Glue = -0.49853
+ WordPenalty = +0.07636
+ LanguageModel = +1.3183
+ LanguageModel_OOV = -0.60902
+ PhraseModel_0 = -0.22481
+ PhraseModel_1 = +0.86369
+ PhraseModel_2 = +1.0747
+ PhraseModel_3 = +0.18002
+ PhraseModel_4 = -0.84661
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = +0.11247
+ PassThrough = -0.63918
+ ---
+ 1best avg score: 0.15478 (+0.15478)
+ 1best avg model score: -7.2154
+ avg # pairs: 1776
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 11.327
+(time 0.27 min, 5 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.15478].
+This took 0.26667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.0 b/training/dtrain/examples/parallelized/work/out.2.0
new file mode 100644
index 00000000..07c85963
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.2.0
@@ -0,0 +1,43 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
+ output 'work/weights.2.0'
+(a dot per input)
+Iteration #1 of 1.
+ .... 3
+WEIGHTS
+ Glue = -0.2109
+ WordPenalty = +0.14922
+ LanguageModel = +0.79686
+ LanguageModel_OOV = -0.6627
+ PhraseModel_0 = +0.37999
+ PhraseModel_1 = +0.69213
+ PhraseModel_2 = +0.3422
+ PhraseModel_3 = +1.1426
+ PhraseModel_4 = -0.55413
+ PhraseModel_5 = +0
+ PhraseModel_6 = +0.0676
+ PassThrough = -0.6343
+ ---
+ 1best avg score: 0.072374 (+0.072374)
+ 1best avg model score: -27.384
+ avg # pairs: 2582
+ non-0 feature count: 11
+ avg list sz: 100
+ avg f count: 11.54
+(time 0.32 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.072374].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.1 b/training/dtrain/examples/parallelized/work/out.2.1
new file mode 100644
index 00000000..c54bb1b1
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.2.1
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
+ output 'work/weights.2.1'
+ weights in 'work/weights.0'
+(a dot per input)
+Iteration #1 of 1.
+ .... 3
+WEIGHTS
+ Glue = -0.76608
+ WordPenalty = +0.15938
+ LanguageModel = +1.5897
+ LanguageModel_OOV = -0.521
+ PhraseModel_0 = -0.58348
+ PhraseModel_1 = +0.29828
+ PhraseModel_2 = +0.78493
+ PhraseModel_3 = +0.083222
+ PhraseModel_4 = -0.93843
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.27382
+ PassThrough = -0.55115
+ ---
+ 1best avg score: 0.12881 (+0.12881)
+ 1best avg model score: -9.6731
+ avg # pairs: 2020.3
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 12
+(time 0.32 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.12881].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.2 b/training/dtrain/examples/parallelized/work/out.2.2
new file mode 100644
index 00000000..f5d6229f
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.2.2
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
+ output 'work/weights.2.2'
+ weights in 'work/weights.1'
+(a dot per input)
+Iteration #1 of 1.
+ .... 3
+WEIGHTS
+ Glue = -0.90863
+ WordPenalty = +0.10819
+ LanguageModel = +0.5239
+ LanguageModel_OOV = -0.41623
+ PhraseModel_0 = -0.86868
+ PhraseModel_1 = +0.40784
+ PhraseModel_2 = +1.1793
+ PhraseModel_3 = -0.24698
+ PhraseModel_4 = -1.2353
+ PhraseModel_5 = +0.03375
+ PhraseModel_6 = -0.17883
+ PassThrough = -0.44638
+ ---
+ 1best avg score: 0.12788 (+0.12788)
+ 1best avg model score: 41.302
+ avg # pairs: 2246.3
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 10.98
+(time 0.35 min, 7 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.12788].
+This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.0 b/training/dtrain/examples/parallelized/work/out.3.0
new file mode 100644
index 00000000..fa499523
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.3.0
@@ -0,0 +1,43 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
+ output 'work/weights.3.0'
+(a dot per input)
+Iteration #1 of 1.
+ .. 1
+WEIGHTS
+ Glue = -0.09
+ WordPenalty = +0.32442
+ LanguageModel = +2.5769
+ LanguageModel_OOV = -0.009
+ PhraseModel_0 = -0.58972
+ PhraseModel_1 = +0.063691
+ PhraseModel_2 = +0.5366
+ PhraseModel_3 = +0.12867
+ PhraseModel_4 = -1.9801
+ PhraseModel_5 = +0.018
+ PhraseModel_6 = -0.486
+ PassThrough = -0.09
+ ---
+ 1best avg score: 0.034204 (+0.034204)
+ 1best avg model score: 0
+ avg # pairs: 1700
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 10.8
+(time 0.1 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.034204].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.1 b/training/dtrain/examples/parallelized/work/out.3.1
new file mode 100644
index 00000000..c4b3aa3c
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.3.1
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
+ output 'work/weights.3.1'
+ weights in 'work/weights.0'
+(a dot per input)
+Iteration #1 of 1.
+ .. 1
+WEIGHTS
+ Glue = +0.31832
+ WordPenalty = +0.11139
+ LanguageModel = +0.95438
+ LanguageModel_OOV = -0.0608
+ PhraseModel_0 = -0.98113
+ PhraseModel_1 = -0.090531
+ PhraseModel_2 = +0.79088
+ PhraseModel_3 = -0.57623
+ PhraseModel_4 = -1.4382
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.10812
+ PassThrough = -0.09095
+ ---
+ 1best avg score: 0.084989 (+0.084989)
+ 1best avg model score: -52.323
+ avg # pairs: 2487
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 12
+(time 0.1 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.084989].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.2 b/training/dtrain/examples/parallelized/work/out.3.2
new file mode 100644
index 00000000..eb27dac2
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/out.3.2
@@ -0,0 +1,44 @@
+Loading the LM will be faster if you build a binary file.
+Reading ../standard/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 1
+ learning rate 0.0001
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
+ output 'work/weights.3.2'
+ weights in 'work/weights.1'
+(a dot per input)
+Iteration #1 of 1.
+ .. 1
+WEIGHTS
+ Glue = -0.12993
+ WordPenalty = +0.13651
+ LanguageModel = +0.58946
+ LanguageModel_OOV = -0.48362
+ PhraseModel_0 = -0.81262
+ PhraseModel_1 = +0.44273
+ PhraseModel_2 = +1.1733
+ PhraseModel_3 = -0.1826
+ PhraseModel_4 = -1.2213
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.18823
+ PassThrough = -0.51378
+ ---
+ 1best avg score: 0.12674 (+0.12674)
+ 1best avg model score: -7.2878
+ avg # pairs: 1769
+ non-0 feature count: 12
+ avg list sz: 100
+ avg f count: 12
+(time 0.1 min, 6 s/S)
+
+---
+Best iteration: 1 [GOLD = 0.12674].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in
index 92f9c78e..a0ef6f54 100644
--- a/training/dtrain/examples/parallelized/work/shard.0.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.0.0.in
@@ -1,5 +1,3 @@
-<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg>
-<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg>
-<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg>
-<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg>
+<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house
+<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
+<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.refs b/training/dtrain/examples/parallelized/work/shard.0.0.refs
deleted file mode 100644
index bef68fee..00000000
--- a/training/dtrain/examples/parallelized/work/shard.0.0.refs
+++ /dev/null
@@ -1,5 +0,0 @@
-europe 's divided racial house
-a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
-the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
-while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
-an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in
index b7695ce7..05f0273b 100644
--- a/training/dtrain/examples/parallelized/work/shard.1.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.1.0.in
@@ -1,5 +1,3 @@
-<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg>
-<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg>
-<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg>
-<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg>
-<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg>
+<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
+<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
+<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.refs b/training/dtrain/examples/parallelized/work/shard.1.0.refs
deleted file mode 100644
index 6076f6d5..00000000
--- a/training/dtrain/examples/parallelized/work/shard.1.0.refs
+++ /dev/null
@@ -1,5 +0,0 @@
-mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
-it will not , as america 's racial history clearly shows .
-race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
-the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
-this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/shard.2.0.in b/training/dtrain/examples/parallelized/work/shard.2.0.in
new file mode 100644
index 00000000..0528d357
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/shard.2.0.in
@@ -0,0 +1,3 @@
+<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows .
+<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
+<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
diff --git a/training/dtrain/examples/parallelized/work/shard.3.0.in b/training/dtrain/examples/parallelized/work/shard.3.0.in
new file mode 100644
index 00000000..f7cbb3e3
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/shard.3.0.in
@@ -0,0 +1 @@
+<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0
index ddd595a8..816269cd 100644
--- a/training/dtrain/examples/parallelized/work/weights.0
+++ b/training/dtrain/examples/parallelized/work/weights.0
@@ -1,12 +1,12 @@
-LanguageModel 0.7004298992212881
-PhraseModel_2 0.5576194336478857
-PhraseModel_1 0.41787318415343155
-PhraseModel_4 -0.46728502545635164
-PhraseModel_3 -0.029839521598455515
-Glue -0.05760000000000068
-PhraseModel_6 -0.2716499999999978
-PhraseModel_0 -0.20831031065605327
-LanguageModel_OOV -0.15205000000000077
-PassThrough -0.1846500000000006
-WordPenalty 0.09636994553433414
-PhraseModel_5 -0.026900000000000257
+LanguageModel 1.200704259340465
+PhraseModel_4 -1.2434381298299035
+PhraseModel_1 0.050697726409824076
+PhraseModel_0 -0.516923312932941
+PhraseModel_2 0.5051987092783867
+PhraseModel_3 0.20955092377784057
+PassThrough -0.32285
+LanguageModel_OOV -0.29269999999999996
+PhraseModel_6 -0.158425
+Glue -0.05817500000000002
+WordPenalty 0.12758486142112804
+PhraseModel_5 0.02435
diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0
index c9370b18..be386c62 100644
--- a/training/dtrain/examples/parallelized/work/weights.0.0
+++ b/training/dtrain/examples/parallelized/work/weights.0.0
@@ -1,12 +1,11 @@
-WordPenalty -0.0079041595706392243
-LanguageModel 0.44781580828279532
-LanguageModel_OOV -0.04010000000000042
-Glue 0.26629999999999948
-PhraseModel_0 -0.19299677809125185
-PhraseModel_1 0.71321026861732773
-PhraseModel_2 0.85195540993310537
-PhraseModel_3 -0.43986310822842656
-PhraseModel_4 -0.44802855630415955
-PhraseModel_5 -0.053800000000000514
-PhraseModel_6 -0.17879999999999835
-PassThrough -0.14770000000000036
+WordPenalty -0.017632355965271129
+LanguageModel 0.72957628464102753
+LanguageModel_OOV -0.23499999999999999
+PhraseModel_0 -0.43720953659541578
+PhraseModel_1 1.0100170838129212
+PhraseModel_2 1.3524984123857073
+PhraseModel_3 -0.25541132249775761
+PhraseModel_4 -0.78115161368856911
+PhraseModel_6 -0.36810000000000004
+Glue 0.34040000000000004
+PassThrough -0.33040000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1
index 8fad3de8..d4c77d07 100644
--- a/training/dtrain/examples/parallelized/work/weights.0.1
+++ b/training/dtrain/examples/parallelized/work/weights.0.1
@@ -1,12 +1,12 @@
-WordPenalty 0.080605055841244472
-LanguageModel -0.026571720531022844
-LanguageModel_OOV -0.30024999999999141
-Glue -0.26989999999999842
-PhraseModel_2 0.92000295209089566
-PhraseModel_1 0.67450748692470841
-PhraseModel_4 -0.5920000014976784
-PhraseModel_3 -0.36402437203127397
-PhraseModel_6 -0.28754999999999603
-PhraseModel_0 -0.32076244202907672
-PassThrough -0.33284999999999004
-PhraseModel_5 -0.026900000000000257
+WordPenalty 0.12966947493426365
+LanguageModel 0.3989224621154368
+LanguageModel_OOV -0.63139999999999996
+PhraseModel_0 -0.63991953012355962
+PhraseModel_1 0.74197897612368646
+PhraseModel_2 1.3096163833051435
+PhraseModel_3 -0.12160001974680773
+PhraseModel_4 -1.2274031286515816
+PhraseModel_5 0.02435
+PhraseModel_6 -0.210925
+Glue -0.40907500000000002
+PassThrough -0.66155000000000008
diff --git a/training/dtrain/examples/parallelized/work/weights.0.2 b/training/dtrain/examples/parallelized/work/weights.0.2
new file mode 100644
index 00000000..8ce1449b
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.0.2
@@ -0,0 +1,12 @@
+WordPenalty 0.10319922626226019
+LanguageModel 0.6647396869692952
+LanguageModel_OOV -0.622525
+PhraseModel_0 -0.59993441316076157
+PhraseModel_1 0.78991513935858193
+PhraseModel_2 1.3148638774685031
+PhraseModel_3 0.2143393571820455
+PhraseModel_4 -1.0173894637028262
+PhraseModel_5 0.02435
+PhraseModel_6 -0.18452499999999999
+Glue -0.44422499999999998
+PassThrough -0.65267500000000012
diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1
index 03058a16..2a00be2e 100644
--- a/training/dtrain/examples/parallelized/work/weights.1
+++ b/training/dtrain/examples/parallelized/work/weights.1
@@ -1,12 +1,12 @@
-PhraseModel_2 0.8365578543552836
-PhraseModel_4 -0.5900840266009169
-PhraseModel_1 0.5312000609786991
-PhraseModel_0 -0.3872342271319619
-PhraseModel_3 -0.3728279676912084
-Glue -0.2938500000000036
-PhraseModel_6 -0.2803499999999967
-PassThrough -0.25014999999999626
-LanguageModel_OOV -0.21754999999999702
-LanguageModel 0.07306061161169894
-WordPenalty 0.09576193325966899
-PhraseModel_5 -0.026900000000000257
+PhraseModel_4 -1.1379250444170055
+PhraseModel_2 1.0578050661336098
+LanguageModel 0.9343385461706668
+PhraseModel_0 -0.6917392152965985
+PhraseModel_1 0.4508371141128957
+PassThrough -0.4411750000000001
+Glue -0.265425
+LanguageModel_OOV -0.411025
+PhraseModel_3 -0.186390082624459
+PhraseModel_6 -0.188225
+WordPenalty 0.09781397468665984
+PhraseModel_5 0.02435
diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0
index 6a6a65c1..cdcf959e 100644
--- a/training/dtrain/examples/parallelized/work/weights.1.0
+++ b/training/dtrain/examples/parallelized/work/weights.1.0
@@ -1,11 +1,12 @@
-WordPenalty 0.20064405063930751
-LanguageModel 0.9530439901597807
-LanguageModel_OOV -0.26400000000000112
-Glue -0.38150000000000084
-PhraseModel_0 -0.22362384322085468
-PhraseModel_1 0.12253609968953538
-PhraseModel_2 0.26328345736266612
-PhraseModel_3 0.38018406503151553
-PhraseModel_4 -0.48654149460854373
-PhraseModel_6 -0.36449999999999722
-PassThrough -0.22160000000000085
+WordPenalty 0.05433023968609621
+LanguageModel 0.69947965605855011
+LanguageModel_OOV -0.2641
+PhraseModel_0 -1.4207505705360111
+PhraseModel_1 -1.563047680441811
+PhraseModel_2 -0.21050528366541305
+PhraseModel_3 -0.17764037275860439
+PhraseModel_4 -1.6583462458159566
+PhraseModel_5 0.079399999999999998
+PhraseModel_6 0.15280000000000002
+Glue -0.27220000000000005
+PassThrough -0.23670000000000002
diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1
index f56ea4a2..c1bb2cf0 100644
--- a/training/dtrain/examples/parallelized/work/weights.1.1
+++ b/training/dtrain/examples/parallelized/work/weights.1.1
@@ -1,12 +1,12 @@
-WordPenalty 0.1109188106780935
-LanguageModel 0.17269294375442074
-LanguageModel_OOV -0.13485000000000266
-Glue -0.3178000000000088
-PhraseModel_2 0.75311275661967159
-PhraseModel_1 0.38789263503268989
-PhraseModel_4 -0.58816805170415531
-PhraseModel_3 -0.38163156335114284
-PhraseModel_6 -0.27314999999999739
-PhraseModel_0 -0.45370601223484697
-PassThrough -0.16745000000000249
-PhraseModel_5 -0.026900000000000257
+WordPenalty -0.0091744709302067785
+LanguageModel 0.79433413663506514
+LanguageModel_OOV -0.43090000000000001
+PhraseModel_0 -0.56242499947237046
+PhraseModel_1 0.85362516703032698
+PhraseModel_2 1.3457900890481096
+PhraseModel_3 -0.13095079554478939
+PhraseModel_4 -0.94761908497413061
+PhraseModel_5 0.02435
+PhraseModel_6 -0.160025
+Glue -0.20487500000000003
+PassThrough -0.46105000000000007
diff --git a/training/dtrain/examples/parallelized/work/weights.1.2 b/training/dtrain/examples/parallelized/work/weights.1.2
new file mode 100644
index 00000000..c9598a04
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.1.2
@@ -0,0 +1,12 @@
+WordPenalty 0.076359827280638559
+LanguageModel 1.3183380272921175
+LanguageModel_OOV -0.60902499999999993
+PhraseModel_0 -0.2248075206657828
+PhraseModel_1 0.86368802571834491
+PhraseModel_2 1.0746702462261808
+PhraseModel_3 0.18002263643876637
+PhraseModel_4 -0.84660750337519441
+PhraseModel_5 0.02435
+PhraseModel_6 0.11247499999999999
+Glue -0.49852500000000005
+PassThrough -0.63917500000000005
diff --git a/training/dtrain/examples/parallelized/work/weights.2 b/training/dtrain/examples/parallelized/work/weights.2
new file mode 100644
index 00000000..310973ec
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.2
@@ -0,0 +1,12 @@
+PhraseModel_2 1.185520780812669
+PhraseModel_4 -1.0801541070647134
+LanguageModel 0.7741099486587568
+PhraseModel_0 -0.6265095873268189
+PhraseModel_1 0.6260421233840029
+PassThrough -0.5630000000000002
+Glue -0.495325
+LanguageModel_OOV -0.53285
+PhraseModel_3 -0.008805626854390465
+PhraseModel_6 -0.10977500000000001
+WordPenalty 0.1060655698428214
+PhraseModel_5 0.026699999999999998
diff --git a/training/dtrain/examples/parallelized/work/weights.2.0 b/training/dtrain/examples/parallelized/work/weights.2.0
new file mode 100644
index 00000000..3e87fed4
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.2.0
@@ -0,0 +1,11 @@
+WordPenalty 0.14922358398195767
+LanguageModel 0.79685677298009394
+LanguageModel_OOV -0.66270000000000007
+PhraseModel_0 0.37998874905310187
+PhraseModel_1 0.69213063228111271
+PhraseModel_2 0.34219807728516061
+PhraseModel_3 1.1425846772648622
+PhraseModel_4 -0.55412548521619742
+PhraseModel_6 0.067599999999999993
+Glue -0.21090000000000003
+PassThrough -0.63429999999999997
diff --git a/training/dtrain/examples/parallelized/work/weights.2.1 b/training/dtrain/examples/parallelized/work/weights.2.1
new file mode 100644
index 00000000..d129dc49
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.2.1
@@ -0,0 +1,12 @@
+WordPenalty 0.1593752174964457
+LanguageModel 1.5897162231676281
+LanguageModel_OOV -0.52100000000000002
+PhraseModel_0 -0.5834836741748588
+PhraseModel_1 0.29827543837280185
+PhraseModel_2 0.78493316593562568
+PhraseModel_3 0.083221832554333464
+PhraseModel_4 -0.93843312963279457
+PhraseModel_5 0.02435
+PhraseModel_6 -0.27382499999999999
+Glue -0.76607500000000006
+PassThrough -0.55115000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.2.2 b/training/dtrain/examples/parallelized/work/weights.2.2
new file mode 100644
index 00000000..bcc83b44
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.2.2
@@ -0,0 +1,12 @@
+WordPenalty 0.10819361280414735
+LanguageModel 0.52389743342585859
+LanguageModel_OOV -0.41622500000000001
+PhraseModel_0 -0.86867995703334211
+PhraseModel_1 0.40783818771767943
+PhraseModel_2 1.1792706530114188
+PhraseModel_3 -0.2469805689928464
+PhraseModel_4 -1.2352895858909159
+PhraseModel_5 0.033750000000000002
+PhraseModel_6 -0.17882500000000001
+Glue -0.90862500000000002
+PassThrough -0.44637500000000013
diff --git a/training/dtrain/examples/parallelized/work/weights.3.0 b/training/dtrain/examples/parallelized/work/weights.3.0
new file mode 100644
index 00000000..e3586048
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.3.0
@@ -0,0 +1,12 @@
+WordPenalty 0.32441797798172944
+LanguageModel 2.5769043236821889
+LanguageModel_OOV -0.0090000000000000011
+PhraseModel_0 -0.58972189365343919
+PhraseModel_1 0.063690869987073351
+PhraseModel_2 0.53660363110809217
+PhraseModel_3 0.12867071310286207
+PhraseModel_4 -1.9801291745988916
+PhraseModel_5 0.018000000000000002
+PhraseModel_6 -0.48600000000000004
+Glue -0.090000000000000011
+PassThrough -0.090000000000000011
diff --git a/training/dtrain/examples/parallelized/work/weights.3.1 b/training/dtrain/examples/parallelized/work/weights.3.1
new file mode 100644
index 00000000..b27687d3
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.3.1
@@ -0,0 +1,12 @@
+WordPenalty 0.11138567724613679
+LanguageModel 0.95438136276453733
+LanguageModel_OOV -0.060799999999999937
+PhraseModel_0 -0.98112865741560529
+PhraseModel_1 -0.090531125075232435
+PhraseModel_2 0.79088062624556033
+PhraseModel_3 -0.57623134776057228
+PhraseModel_4 -1.4382448344095151
+PhraseModel_5 0.02435
+PhraseModel_6 -0.108125
+Glue 0.31832499999999997
+PassThrough -0.090950000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.3.2 b/training/dtrain/examples/parallelized/work/weights.3.2
new file mode 100644
index 00000000..ccb591a2
--- /dev/null
+++ b/training/dtrain/examples/parallelized/work/weights.3.2
@@ -0,0 +1,12 @@
+WordPenalty 0.13650961302423945
+LanguageModel 0.58946464694775647
+LanguageModel_OOV -0.48362499999999997
+PhraseModel_0 -0.81261645844738917
+PhraseModel_1 0.44272714074140529
+PhraseModel_2 1.1732783465445731
+PhraseModel_3 -0.18260393204552733
+PhraseModel_4 -1.2213298752899167
+PhraseModel_5 0.02435
+PhraseModel_6 -0.188225
+Glue -0.12992500000000001
+PassThrough -0.51377500000000009
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index a515db02..f2698007 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,27 +1,10 @@
-#input=./nc-wmt11.de.gz
-#refs=./nc-wmt11.en.gz
-bitext=./nc-wmt11.gz
+bitext=./nc-wmt11.gz # input bitext
output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
-select_weights=avg # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
-# weights for these features will be printed on each iteration
+iterations=3 # run over input 3 times
+k=100 # use 100best lists
+N=4 # optimize (approx.) BLEU4
+learning_rate=0.1 # learning rate
+error_margin=1.0 # margin for margin perceptron
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
-# newer version of the grammar extractor use different feature names:
-#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV
-stop_after=10 # stop epoch after 10 inputs
-# interesting stuff
-epochs=3 # run over input 3 times
-k=100 # use 100best lists
-N=4 # optimize (approx) BLEU4
-scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
-gamma=0 # use SVM reg
-sample_from=kbest # use kbest lists (as opposed to forest)
-filter=uniq # only unique entries in kbest (surface form)
-pair_sampling=XYX #
-hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
-pair_threshold=0 # minimum distance in BLEU (here: > 0)
-loss_margin=0 # update if correctly ranked, but within this margin
-repeat=1 # repeat training on a kbest list 1 times
-#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
deleted file mode 100644
index 2460cfbb..00000000
--- a/training/dtrain/examples/standard/expected-output
+++ /dev/null
@@ -1,123 +0,0 @@
- cdec cfg './cdec.ini'
-Loading the LM will be faster if you build a binary file.
-Reading ./nc-wmt11.en.srilm.gz
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
- Example feature: Shape_S00000_T00000
-T=1 I=1 D=1
-Seeding random number sequence to 2327685089
-
-dtrain
-Parameters:
- k 100
- N 4
- T 3
- batch 0
- scorer 'fixed_stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
- learning rate 0.1
- gamma 0
- loss margin 0
- faster perceptron 1
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'avg'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg './cdec.ini'
- input './nc-wmt11.gz'
- output '-'
- stop_after 10
-(a dot represents 10 inputs)
-Iteration #1 of 3.
- . 10
-Stopping after 10 input sentences.
-WEIGHTS
- Glue = +6.9
- WordPenalty = -46.426
- LanguageModel = +535.12
- LanguageModel_OOV = -123.5
- PhraseModel_0 = -160.73
- PhraseModel_1 = -350.13
- PhraseModel_2 = -187.81
- PhraseModel_3 = +172.04
- PhraseModel_4 = +0.90108
- PhraseModel_5 = +21.6
- PhraseModel_6 = +67.2
- PassThrough = -149.7
- ---
- 1best avg score: 0.23327 (+0.23327)
- 1best avg model score: -9084.9 (-9084.9)
- avg # pairs: 780.7
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 1389
- avg list sz: 91.3
- avg f count: 146.2
-(time 0.37 min, 2.2 s/S)
-
-Iteration #2 of 3.
- . 10
-WEIGHTS
- Glue = -43
- WordPenalty = -22.019
- LanguageModel = +591.53
- LanguageModel_OOV = -252.1
- PhraseModel_0 = -120.21
- PhraseModel_1 = -43.589
- PhraseModel_2 = +73.53
- PhraseModel_3 = +113.7
- PhraseModel_4 = -223.81
- PhraseModel_5 = +64
- PhraseModel_6 = +54.8
- PassThrough = -331.1
- ---
- 1best avg score: 0.29568 (+0.062413)
- 1best avg model score: -15879 (-6794.1)
- avg # pairs: 566.1
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 1931
- avg list sz: 91.3
- avg f count: 139.89
-(time 0.33 min, 2 s/S)
-
-Iteration #3 of 3.
- . 10
-WEIGHTS
- Glue = -44.3
- WordPenalty = -131.85
- LanguageModel = +230.91
- LanguageModel_OOV = -285.4
- PhraseModel_0 = -194.27
- PhraseModel_1 = -294.83
- PhraseModel_2 = -92.043
- PhraseModel_3 = -140.24
- PhraseModel_4 = +85.613
- PhraseModel_5 = +238.1
- PhraseModel_6 = +158.7
- PassThrough = -359.6
- ---
- 1best avg score: 0.37375 (+0.078067)
- 1best avg model score: -14519 (+1359.7)
- avg # pairs: 545.4
- avg # rank err: 0 (meaningless)
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 2218
- avg list sz: 91.3
- avg f count: 137.77
-(time 0.35 min, 2.1 s/S)
-
-Writing weights file to '-' ...
-done
-
----
-Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.37375].
-This took 1.05 min.
diff --git a/training/dtrain/examples/standard/expected-output.gz b/training/dtrain/examples/standard/expected-output.gz
new file mode 100644
index 00000000..43e6b21a
--- /dev/null
+++ b/training/dtrain/examples/standard/expected-output.gz
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.de.gz b/training/dtrain/examples/standard/nc-wmt11.de.gz
deleted file mode 100644
index 0741fd92..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.de.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/standard/nc-wmt11.en.gz b/training/dtrain/examples/standard/nc-wmt11.en.gz
deleted file mode 100644
index 1c0bd401..00000000
--- a/training/dtrain/examples/standard/nc-wmt11.en.gz
+++ /dev/null
Binary files differ
diff --git a/training/dtrain/examples/toy/dtrain.ini b/training/dtrain/examples/toy/dtrain.ini
index ef956df7..378224b8 100644
--- a/training/dtrain/examples/toy/dtrain.ini
+++ b/training/dtrain/examples/toy/dtrain.ini
@@ -1,13 +1,8 @@
decoder_config=cdec.ini
-input=src
-refs=tgt
-output=-
-print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6
+bitext=in
+output=weights
k=4
N=4
-epochs=2
-scorer=bleu
-sample_from=kbest
-filter=uniq
-pair_sampling=all
+iterations=2
learning_rate=1
+print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6
diff --git a/training/dtrain/examples/toy/expected-output b/training/dtrain/examples/toy/expected-output
index 1da2aadd..8c758d00 100644
--- a/training/dtrain/examples/toy/expected-output
+++ b/training/dtrain/examples/toy/expected-output
@@ -1,77 +1,63 @@
-Warning: hi_lo only works with pair_sampling XYX.
- cdec cfg 'cdec.ini'
-Seeding random number sequence to 1664825829
-
dtrain
Parameters:
k 4
N 4
T 2
- scorer 'bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 1
- gamma 0
- loss margin 0
- pairs 'all'
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- max pairs 4294967295
- cdec cfg 'cdec.ini'
- input 'src'
- refs 'tgt'
- output '-'
-(a dot represents 10 inputs)
+ error margin 0
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'in'
+ output 'weights'
+(a dot per input)
Iteration #1 of 2.
- 2
+ ... 2
WEIGHTS
logp = +0
- shell_rule = -1
- house_rule = +2
- small_rule = -2
+ shell_rule = +0
+ house_rule = +3
+ small_rule = +0
little_rule = +3
- PassThrough = -5
+ PassThrough = -15
+ PassThrough_1 = +0
+ PassThrough_2 = +0
+ PassThrough_3 = +0
+ PassThrough_4 = +0
+ PassThrough_5 = +0
+ PassThrough_6 = +0
---
- 1best avg score: 0.5 (+0.5)
- 1best avg model score: 2.5 (+2.5)
- avg # pairs: 4
- avg # rank err: 1.5
- avg # margin viol: 0
- non0 feature count: 6
+ 1best avg score: 0.40937 (+0.40937)
+ 1best avg model score: 3
+ avg # pairs: 2.5
+ non-0 feature count: 4
avg list sz: 4
avg f count: 2.875
(time 0 min, 0 s/S)
Iteration #2 of 2.
- 2
+ ... 2
WEIGHTS
logp = +0
- shell_rule = -1
- house_rule = +2
- small_rule = -2
+ shell_rule = +0
+ house_rule = +3
+ small_rule = +0
little_rule = +3
- PassThrough = -5
+ PassThrough = -15
+ PassThrough_1 = +0
+ PassThrough_2 = +0
+ PassThrough_3 = +0
+ PassThrough_4 = +0
+ PassThrough_5 = +0
+ PassThrough_6 = +0
---
- 1best avg score: 1 (+0.5)
- 1best avg model score: 5 (+2.5)
- avg # pairs: 5
- avg # rank err: 0
- avg # margin viol: 0
- non0 feature count: 6
+ 1best avg score: 0.81873 (+0.40937)
+ 1best avg model score: 6
+ avg # pairs: 0
+ non-0 feature count: 4
avg list sz: 4
avg f count: 3
(time 0 min, 0 s/S)
-Writing weights file to '-' ...
-house_rule 2
-little_rule 3
-Glue -4
-PassThrough -5
-small_rule -2
-shell_rule -1
-done
-
---
-Best iteration: 2 [SCORE 'bleu'=1].
+Best iteration: 2 [GOLD = 0.81873].
This took 0 min.
diff --git a/training/dtrain/examples/toy/in b/training/dtrain/examples/toy/in
new file mode 100644
index 00000000..5d70795d
--- /dev/null
+++ b/training/dtrain/examples/toy/in
@@ -0,0 +1,2 @@
+ich sah ein kleines haus ||| i saw a little house
+ich fand ein kleines haus ||| i found a little house
diff --git a/training/dtrain/examples/toy/src b/training/dtrain/examples/toy/src
deleted file mode 100644
index 87e39ef2..00000000
--- a/training/dtrain/examples/toy/src
+++ /dev/null
@@ -1,2 +0,0 @@
-ich sah ein kleines haus
-ich fand ein kleines haus
diff --git a/training/dtrain/examples/toy/tgt b/training/dtrain/examples/toy/tgt
deleted file mode 100644
index 174926b3..00000000
--- a/training/dtrain/examples/toy/tgt
+++ /dev/null
@@ -1,2 +0,0 @@
-i saw a little house
-i found a little house
diff --git a/training/dtrain/examples/toy/weights b/training/dtrain/examples/toy/weights
new file mode 100644
index 00000000..f6f32772
--- /dev/null
+++ b/training/dtrain/examples/toy/weights
@@ -0,0 +1,4 @@
+house_rule 3
+little_rule 3
+Glue -12
+PassThrough -15