summaryrefslogtreecommitdiff
path: root/training/dtrain/examples/parallelized/work
diff options
context:
space:
mode:
Diffstat (limited to 'training/dtrain/examples/parallelized/work')
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.074
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.174
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.274
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.072
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.174
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.274
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.072
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.174
-rw-r--r--training/dtrain/examples/parallelized/work/out.2.274
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.074
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.172
-rw-r--r--training/dtrain/examples/parallelized/work/out.3.272
-rw-r--r--training/dtrain/examples/parallelized/work/shard.0.0.in4
-rw-r--r--training/dtrain/examples/parallelized/work/shard.1.0.in4
-rw-r--r--training/dtrain/examples/parallelized/work/shard.2.0.in4
-rw-r--r--training/dtrain/examples/parallelized/work/shard.3.0.in2
-rw-r--r--training/dtrain/examples/parallelized/work/weights.024
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.023
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.0.224
-rw-r--r--training/dtrain/examples/parallelized/work/weights.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.023
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.1.224
-rw-r--r--training/dtrain/examples/parallelized/work/weights.224
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.022
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.2.224
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.024
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.124
-rw-r--r--training/dtrain/examples/parallelized/work/weights.3.224
31 files changed, 493 insertions, 757 deletions
diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0
index 9154c906..77749404 100644
--- a/training/dtrain/examples/parallelized/work/out.0.0
+++ b/training/dtrain/examples/parallelized/work/out.0.0
@@ -1,65 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 4087834873
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.0.0.in'
output 'work/weights.0.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = +0.257
- WordPenalty = +0.026926
- LanguageModel = +0.67342
- LanguageModel_OOV = -0.046
- PhraseModel_0 = +0.25329
- PhraseModel_1 = +0.20036
- PhraseModel_2 = +0.00060731
- PhraseModel_3 = +0.65578
- PhraseModel_4 = +0.47916
- PhraseModel_5 = +0.004
- PhraseModel_6 = +0.1829
- PassThrough = -0.082
+ Glue = +0.3404
+ WordPenalty = -0.017632
+ LanguageModel = +0.72958
+ LanguageModel_OOV = -0.235
+ PhraseModel_0 = -0.43721
+ PhraseModel_1 = +1.01
+ PhraseModel_2 = +1.3525
+ PhraseModel_3 = -0.25541
+ PhraseModel_4 = -0.78115
+ PhraseModel_5 = +0
+ PhraseModel_6 = -0.3681
+ PassThrough = -0.3304
---
- 1best avg score: 0.04518 (+0.04518)
- 1best avg model score: 32.803 (+32.803)
- avg # pairs: 1266.3
- avg # rank err: 857
- avg # margin viol: 386.67
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.19474 (+0.19474)
+ 1best avg model score: 0.52232
+ avg # pairs: 2513
+ non-0 feature count: 11
avg list sz: 100
- avg f count: 10.853
-(time 0.47 min, 9.3 s/S)
-
-Writing weights file to 'work/weights.0.0' ...
-done
+ avg f count: 11.42
+(time 0.32 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.04518].
-This took 0.46667 min.
+Best iteration: 1 [GOLD = 0.19474].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1
index 0dbc7bd3..d0dee623 100644
--- a/training/dtrain/examples/parallelized/work/out.0.1
+++ b/training/dtrain/examples/parallelized/work/out.0.1
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 2283043509
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.0.0.in'
output 'work/weights.0.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.17905
- WordPenalty = +0.062126
- LanguageModel = +0.66825
- LanguageModel_OOV = -0.15248
- PhraseModel_0 = -0.55811
- PhraseModel_1 = +0.12741
- PhraseModel_2 = +0.60388
- PhraseModel_3 = -0.44464
- PhraseModel_4 = -0.63137
- PhraseModel_5 = -0.0084
- PhraseModel_6 = -0.20165
- PassThrough = -0.23468
+ Glue = -0.40908
+ WordPenalty = +0.12967
+ LanguageModel = +0.39892
+ LanguageModel_OOV = -0.6314
+ PhraseModel_0 = -0.63992
+ PhraseModel_1 = +0.74198
+ PhraseModel_2 = +1.3096
+ PhraseModel_3 = -0.1216
+ PhraseModel_4 = -1.2274
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.21093
+ PassThrough = -0.66155
---
- 1best avg score: 0.14066 (+0.14066)
- 1best avg model score: -37.614 (-37.614)
- avg # pairs: 1244.7
- avg # rank err: 728
- avg # margin viol: 516.67
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.15735 (+0.15735)
+ 1best avg model score: 46.831
+ avg # pairs: 2132.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.507
-(time 0.45 min, 9 s/S)
-
-Writing weights file to 'work/weights.0.1' ...
-done
+ avg f count: 10.64
+(time 0.38 min, 7 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.14066].
-This took 0.45 min.
+Best iteration: 1 [GOLD = 0.15735].
+This took 0.38333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.2 b/training/dtrain/examples/parallelized/work/out.0.2
index fcecc7e1..9c4b110b 100644
--- a/training/dtrain/examples/parallelized/work/out.0.2
+++ b/training/dtrain/examples/parallelized/work/out.0.2
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 3693132895
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.0.0.in'
output 'work/weights.0.2'
weights in 'work/weights.1'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.019275
- WordPenalty = +0.022192
- LanguageModel = +0.40688
- LanguageModel_OOV = -0.36397
- PhraseModel_0 = -0.36273
- PhraseModel_1 = +0.56432
- PhraseModel_2 = +0.85638
- PhraseModel_3 = -0.20222
- PhraseModel_4 = -0.48295
- PhraseModel_5 = +0.03145
- PhraseModel_6 = -0.26092
- PassThrough = -0.38122
+ Glue = -0.44422
+ WordPenalty = +0.1032
+ LanguageModel = +0.66474
+ LanguageModel_OOV = -0.62252
+ PhraseModel_0 = -0.59993
+ PhraseModel_1 = +0.78992
+ PhraseModel_2 = +1.3149
+ PhraseModel_3 = +0.21434
+ PhraseModel_4 = -1.0174
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.18452
+ PassThrough = -0.65268
---
- 1best avg score: 0.18982 (+0.18982)
- 1best avg model score: 1.7096 (+1.7096)
- avg # pairs: 1524.3
- avg # rank err: 813.33
- avg # margin viol: 702.67
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.24722 (+0.24722)
+ 1best avg model score: 61.971
+ avg # pairs: 2017.7
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.32
-(time 0.53 min, 11 s/S)
-
-Writing weights file to 'work/weights.0.2' ...
-done
+ avg f count: 10.42
+(time 0.3 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.18982].
-This took 0.53333 min.
+Best iteration: 1 [GOLD = 0.24722].
+This took 0.3 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0
index 595dfc94..3dc4dca6 100644
--- a/training/dtrain/examples/parallelized/work/out.1.0
+++ b/training/dtrain/examples/parallelized/work/out.1.0
@@ -1,65 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 859043351
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.1.0.in'
output 'work/weights.1.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.3229
- WordPenalty = +0.27969
- LanguageModel = +1.3645
- LanguageModel_OOV = -0.0443
- PhraseModel_0 = -0.19049
- PhraseModel_1 = -0.077698
- PhraseModel_2 = +0.058898
- PhraseModel_3 = +0.017251
- PhraseModel_4 = -1.5474
- PhraseModel_5 = +0
- PhraseModel_6 = -0.1818
- PassThrough = -0.193
+ Glue = -0.2722
+ WordPenalty = +0.05433
+ LanguageModel = +0.69948
+ LanguageModel_OOV = -0.2641
+ PhraseModel_0 = -1.4208
+ PhraseModel_1 = -1.563
+ PhraseModel_2 = -0.21051
+ PhraseModel_3 = -0.17764
+ PhraseModel_4 = -1.6583
+ PhraseModel_5 = +0.0794
+ PhraseModel_6 = +0.1528
+ PassThrough = -0.2367
---
- 1best avg score: 0.070229 (+0.070229)
- 1best avg model score: -44.01 (-44.01)
- avg # pairs: 1294
- avg # rank err: 878.67
- avg # margin viol: 350.67
- k-best loss imp: 100%
- non0 feature count: 11
+ 1best avg score: 0.071329 (+0.071329)
+ 1best avg model score: -41.362
+ avg # pairs: 1862.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.487
-(time 0.28 min, 5.7 s/S)
-
-Writing weights file to 'work/weights.1.0' ...
-done
+ avg f count: 11.847
+(time 0.28 min, 5 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.070229].
+Best iteration: 1 [GOLD = 0.071329].
This took 0.28333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1
index 9346fc82..79ac35dc 100644
--- a/training/dtrain/examples/parallelized/work/out.1.1
+++ b/training/dtrain/examples/parallelized/work/out.1.1
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 3557309480
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.1.0.in'
output 'work/weights.1.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.26425
- WordPenalty = +0.047881
- LanguageModel = +0.78496
- LanguageModel_OOV = -0.49307
- PhraseModel_0 = -0.58703
- PhraseModel_1 = -0.33425
- PhraseModel_2 = +0.20834
- PhraseModel_3 = -0.043346
- PhraseModel_4 = -0.60761
- PhraseModel_5 = +0.123
- PhraseModel_6 = -0.05415
- PassThrough = -0.42167
+ Glue = -0.20488
+ WordPenalty = -0.0091745
+ LanguageModel = +0.79433
+ LanguageModel_OOV = -0.4309
+ PhraseModel_0 = -0.56242
+ PhraseModel_1 = +0.85363
+ PhraseModel_2 = +1.3458
+ PhraseModel_3 = -0.13095
+ PhraseModel_4 = -0.94762
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.16003
+ PassThrough = -0.46105
---
- 1best avg score: 0.085952 (+0.085952)
- 1best avg model score: -45.175 (-45.175)
- avg # pairs: 1180.7
- avg # rank err: 668.33
- avg # margin viol: 512.33
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.13017 (+0.13017)
+ 1best avg model score: 14.53
+ avg # pairs: 1968
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 12
-(time 0.27 min, 5.3 s/S)
-
-Writing weights file to 'work/weights.1.1' ...
-done
+ avg f count: 11
+(time 0.33 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.085952].
-This took 0.26667 min.
+Best iteration: 1 [GOLD = 0.13017].
+This took 0.33333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.2 b/training/dtrain/examples/parallelized/work/out.1.2
index 08f07a75..8c4f8b03 100644
--- a/training/dtrain/examples/parallelized/work/out.1.2
+++ b/training/dtrain/examples/parallelized/work/out.1.2
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 56743915
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.1.0.in'
output 'work/weights.1.2'
weights in 'work/weights.1'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.23608
- WordPenalty = +0.10931
- LanguageModel = +0.81339
- LanguageModel_OOV = -0.33238
- PhraseModel_0 = -0.53685
- PhraseModel_1 = -0.049658
- PhraseModel_2 = +0.40277
- PhraseModel_3 = +0.14601
- PhraseModel_4 = -0.72851
- PhraseModel_5 = +0.03475
- PhraseModel_6 = -0.27192
- PassThrough = -0.34763
+ Glue = -0.49853
+ WordPenalty = +0.07636
+ LanguageModel = +1.3183
+ LanguageModel_OOV = -0.60902
+ PhraseModel_0 = -0.22481
+ PhraseModel_1 = +0.86369
+ PhraseModel_2 = +1.0747
+ PhraseModel_3 = +0.18002
+ PhraseModel_4 = -0.84661
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = +0.11247
+ PassThrough = -0.63918
---
- 1best avg score: 0.10073 (+0.10073)
- 1best avg model score: -38.422 (-38.422)
- avg # pairs: 1505.3
- avg # rank err: 777
- avg # margin viol: 691.67
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.15478 (+0.15478)
+ 1best avg model score: -7.2154
+ avg # pairs: 1776
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 12
-(time 0.35 min, 7 s/S)
-
-Writing weights file to 'work/weights.1.2' ...
-done
+ avg f count: 11.327
+(time 0.27 min, 5 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.10073].
-This took 0.35 min.
+Best iteration: 1 [GOLD = 0.15478].
+This took 0.26667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.0 b/training/dtrain/examples/parallelized/work/out.2.0
index 25ef6d4e..07c85963 100644
--- a/training/dtrain/examples/parallelized/work/out.2.0
+++ b/training/dtrain/examples/parallelized/work/out.2.0
@@ -1,65 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 2662215673
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
output 'work/weights.2.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.1259
- WordPenalty = +0.048294
- LanguageModel = +0.36254
- LanguageModel_OOV = -0.1228
- PhraseModel_0 = +0.26357
- PhraseModel_1 = +0.24793
- PhraseModel_2 = +0.0063763
- PhraseModel_3 = -0.18966
- PhraseModel_4 = -0.226
+ Glue = -0.2109
+ WordPenalty = +0.14922
+ LanguageModel = +0.79686
+ LanguageModel_OOV = -0.6627
+ PhraseModel_0 = +0.37999
+ PhraseModel_1 = +0.69213
+ PhraseModel_2 = +0.3422
+ PhraseModel_3 = +1.1426
+ PhraseModel_4 = -0.55413
PhraseModel_5 = +0
- PhraseModel_6 = +0.0743
- PassThrough = -0.1335
+ PhraseModel_6 = +0.0676
+ PassThrough = -0.6343
---
- 1best avg score: 0.072836 (+0.072836)
- 1best avg model score: -0.56296 (-0.56296)
- avg # pairs: 1094.7
- avg # rank err: 658
- avg # margin viol: 436.67
- k-best loss imp: 100%
- non0 feature count: 11
+ 1best avg score: 0.072374 (+0.072374)
+ 1best avg model score: -27.384
+ avg # pairs: 2582
+ non-0 feature count: 11
avg list sz: 100
- avg f count: 10.813
-(time 0.13 min, 2.7 s/S)
-
-Writing weights file to 'work/weights.2.0' ...
-done
+ avg f count: 11.54
+(time 0.32 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.072836].
-This took 0.13333 min.
+Best iteration: 1 [GOLD = 0.072374].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.1 b/training/dtrain/examples/parallelized/work/out.2.1
index 8e4efde9..c54bb1b1 100644
--- a/training/dtrain/examples/parallelized/work/out.2.1
+++ b/training/dtrain/examples/parallelized/work/out.2.1
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 3092904479
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
output 'work/weights.2.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.10385
- WordPenalty = +0.038717
- LanguageModel = +0.49413
- LanguageModel_OOV = -0.24887
- PhraseModel_0 = -0.32102
- PhraseModel_1 = +0.34413
- PhraseModel_2 = +0.62366
- PhraseModel_3 = -0.49337
- PhraseModel_4 = -0.77005
- PhraseModel_5 = +0.007
- PhraseModel_6 = -0.05055
- PassThrough = -0.23928
+ Glue = -0.76608
+ WordPenalty = +0.15938
+ LanguageModel = +1.5897
+ LanguageModel_OOV = -0.521
+ PhraseModel_0 = -0.58348
+ PhraseModel_1 = +0.29828
+ PhraseModel_2 = +0.78493
+ PhraseModel_3 = +0.083222
+ PhraseModel_4 = -0.93843
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.27382
+ PassThrough = -0.55115
---
- 1best avg score: 0.10245 (+0.10245)
- 1best avg model score: -20.384 (-20.384)
- avg # pairs: 1741.7
- avg # rank err: 953.67
- avg # margin viol: 585.33
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.12881 (+0.12881)
+ 1best avg model score: -9.6731
+ avg # pairs: 2020.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.977
-(time 0.12 min, 2.3 s/S)
-
-Writing weights file to 'work/weights.2.1' ...
-done
+ avg f count: 12
+(time 0.32 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.10245].
-This took 0.11667 min.
+Best iteration: 1 [GOLD = 0.12881].
+This took 0.31667 min.
diff --git a/training/dtrain/examples/parallelized/work/out.2.2 b/training/dtrain/examples/parallelized/work/out.2.2
index e0ca2110..f5d6229f 100644
--- a/training/dtrain/examples/parallelized/work/out.2.2
+++ b/training/dtrain/examples/parallelized/work/out.2.2
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 2803362953
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.2.0.in'
output 'work/weights.2.2'
weights in 'work/weights.1'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 3
+ .... 3
WEIGHTS
- Glue = -0.32907
- WordPenalty = +0.049596
- LanguageModel = +0.33496
- LanguageModel_OOV = -0.44357
- PhraseModel_0 = -0.3068
- PhraseModel_1 = +0.59376
- PhraseModel_2 = +0.86416
- PhraseModel_3 = -0.21072
- PhraseModel_4 = -0.65734
- PhraseModel_5 = +0.03475
- PhraseModel_6 = -0.10653
- PassThrough = -0.46082
+ Glue = -0.90863
+ WordPenalty = +0.10819
+ LanguageModel = +0.5239
+ LanguageModel_OOV = -0.41623
+ PhraseModel_0 = -0.86868
+ PhraseModel_1 = +0.40784
+ PhraseModel_2 = +1.1793
+ PhraseModel_3 = -0.24698
+ PhraseModel_4 = -1.2353
+ PhraseModel_5 = +0.03375
+ PhraseModel_6 = -0.17883
+ PassThrough = -0.44638
---
- 1best avg score: 0.25055 (+0.25055)
- 1best avg model score: -1.4459 (-1.4459)
- avg # pairs: 1689
- avg # rank err: 755.67
- avg # margin viol: 829.33
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.12788 (+0.12788)
+ 1best avg model score: 41.302
+ avg # pairs: 2246.3
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 10.53
-(time 0.13 min, 2.7 s/S)
-
-Writing weights file to 'work/weights.2.2' ...
-done
+ avg f count: 10.98
+(time 0.35 min, 7 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.25055].
-This took 0.13333 min.
+Best iteration: 1 [GOLD = 0.12788].
+This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.0 b/training/dtrain/examples/parallelized/work/out.3.0
index 3c074f04..fa499523 100644
--- a/training/dtrain/examples/parallelized/work/out.3.0
+++ b/training/dtrain/examples/parallelized/work/out.3.0
@@ -1,65 +1,43 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 316107185
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
output 'work/weights.3.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 1
+ .. 1
WEIGHTS
- Glue = +0.046
- WordPenalty = +0.17328
- LanguageModel = +1.1667
- LanguageModel_OOV = +0.066
- PhraseModel_0 = -1.1694
- PhraseModel_1 = -0.9883
- PhraseModel_2 = +0.036205
- PhraseModel_3 = -0.77387
- PhraseModel_4 = -1.5019
- PhraseModel_5 = +0.024
- PhraseModel_6 = -0.514
- PassThrough = +0.031
+ Glue = -0.09
+ WordPenalty = +0.32442
+ LanguageModel = +2.5769
+ LanguageModel_OOV = -0.009
+ PhraseModel_0 = -0.58972
+ PhraseModel_1 = +0.063691
+ PhraseModel_2 = +0.5366
+ PhraseModel_3 = +0.12867
+ PhraseModel_4 = -1.9801
+ PhraseModel_5 = +0.018
+ PhraseModel_6 = -0.486
+ PassThrough = -0.09
---
- 1best avg score: 0.032916 (+0.032916)
- 1best avg model score: 0 (+0)
- avg # pairs: 900
- avg # rank err: 900
- avg # margin viol: 0
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.034204 (+0.034204)
+ 1best avg model score: 0
+ avg # pairs: 1700
+ non-0 feature count: 12
avg list sz: 100
- avg f count: 11.72
-(time 0.23 min, 14 s/S)
-
-Writing weights file to 'work/weights.3.0' ...
-done
+ avg f count: 10.8
+(time 0.1 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.032916].
-This took 0.23333 min.
+Best iteration: 1 [GOLD = 0.034204].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.1 b/training/dtrain/examples/parallelized/work/out.3.1
index 241d3455..c4b3aa3c 100644
--- a/training/dtrain/examples/parallelized/work/out.3.1
+++ b/training/dtrain/examples/parallelized/work/out.3.1
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 353677750
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
output 'work/weights.3.1'
weights in 'work/weights.0'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 1
+ .. 1
WEIGHTS
- Glue = -0.08475
- WordPenalty = +0.11151
- LanguageModel = +1.0635
- LanguageModel_OOV = -0.11468
- PhraseModel_0 = -0.062922
- PhraseModel_1 = +0.0035552
- PhraseModel_2 = +0.039692
- PhraseModel_3 = +0.080265
- PhraseModel_4 = -0.57787
- PhraseModel_5 = +0.0174
- PhraseModel_6 = -0.17095
- PassThrough = -0.18248
+ Glue = +0.31832
+ WordPenalty = +0.11139
+ LanguageModel = +0.95438
+ LanguageModel_OOV = -0.0608
+ PhraseModel_0 = -0.98113
+ PhraseModel_1 = -0.090531
+ PhraseModel_2 = +0.79088
+ PhraseModel_3 = -0.57623
+ PhraseModel_4 = -1.4382
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.10812
+ PassThrough = -0.09095
---
- 1best avg score: 0.16117 (+0.16117)
- 1best avg model score: -67.89 (-67.89)
- avg # pairs: 1411
- avg # rank err: 460
- avg # margin viol: 951
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.084989 (+0.084989)
+ 1best avg model score: -52.323
+ avg # pairs: 2487
+ non-0 feature count: 12
avg list sz: 100
avg f count: 12
-(time 0.22 min, 13 s/S)
-
-Writing weights file to 'work/weights.3.1' ...
-done
+(time 0.1 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.16117].
-This took 0.21667 min.
+Best iteration: 1 [GOLD = 0.084989].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/out.3.2 b/training/dtrain/examples/parallelized/work/out.3.2
index b995daf5..eb27dac2 100644
--- a/training/dtrain/examples/parallelized/work/out.3.2
+++ b/training/dtrain/examples/parallelized/work/out.3.2
@@ -1,66 +1,44 @@
- cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
Reading ../standard/nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 3001145976
-
dtrain
Parameters:
k 100
N 4
T 1
- batch 0
- scorer 'stupid_bleu'
- sample from 'kbest'
- filter 'uniq'
learning rate 0.0001
- gamma 0
- loss margin 1
- faster perceptron 0
- pairs 'XYX'
- hi lo 0.1
- pair threshold 0
- select weights 'last'
- l1 reg 0 'none'
- pclr no
- max pairs 4294967295
- repeat 1
- cdec cfg 'cdec.ini'
- input ''
+ error margin 1
+ l1 reg 0
+ decoder conf 'cdec.ini'
+ input 'work/shard.3.0.in'
output 'work/weights.3.2'
weights in 'work/weights.1'
-(a dot represents 10 inputs)
+(a dot per input)
Iteration #1 of 1.
- 1
+ .. 1
WEIGHTS
- Glue = -0.13247
- WordPenalty = +0.053592
- LanguageModel = +0.72105
- LanguageModel_OOV = -0.30827
- PhraseModel_0 = -0.37053
- PhraseModel_1 = +0.17551
- PhraseModel_2 = +0.5
- PhraseModel_3 = -0.1459
- PhraseModel_4 = -0.59563
- PhraseModel_5 = +0.03475
- PhraseModel_6 = -0.11143
- PassThrough = -0.32553
+ Glue = -0.12993
+ WordPenalty = +0.13651
+ LanguageModel = +0.58946
+ LanguageModel_OOV = -0.48362
+ PhraseModel_0 = -0.81262
+ PhraseModel_1 = +0.44273
+ PhraseModel_2 = +1.1733
+ PhraseModel_3 = -0.1826
+ PhraseModel_4 = -1.2213
+ PhraseModel_5 = +0.02435
+ PhraseModel_6 = -0.18823
+ PassThrough = -0.51378
---
- 1best avg score: 0.12501 (+0.12501)
- 1best avg model score: -62.128 (-62.128)
- avg # pairs: 979
- avg # rank err: 539
- avg # margin viol: 440
- k-best loss imp: 100%
- non0 feature count: 12
+ 1best avg score: 0.12674 (+0.12674)
+ 1best avg model score: -7.2878
+ avg # pairs: 1769
+ non-0 feature count: 12
avg list sz: 100
avg f count: 12
-(time 0.22 min, 13 s/S)
-
-Writing weights file to 'work/weights.3.2' ...
-done
+(time 0.1 min, 6 s/S)
---
-Best iteration: 1 [SCORE 'stupid_bleu'=0.12501].
-This took 0.21667 min.
+Best iteration: 1 [GOLD = 0.12674].
+This took 0.1 min.
diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in
index d1b48321..a0ef6f54 100644
--- a/training/dtrain/examples/parallelized/work/shard.0.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.0.0.in
@@ -1,3 +1,3 @@
-<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
-<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
+<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house
+<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
<seg grammar="grammar/grammar.out.2.gz" id="2">der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln .</seg> ||| the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them .
diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in
index a63f05bd..05f0273b 100644
--- a/training/dtrain/examples/parallelized/work/shard.1.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.1.0.in
@@ -1,3 +1,3 @@
-<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
-<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
<seg grammar="grammar/grammar.out.3.gz" id="3">während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden .</seg> ||| while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon .
+<seg grammar="grammar/grammar.out.4.gz" id="4">eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern .</seg> ||| an aging population at home and ever more open borders imply increasing racial fragmentation in european countries .
+<seg grammar="grammar/grammar.out.5.gz" id="5">die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden .</seg> ||| mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear .
diff --git a/training/dtrain/examples/parallelized/work/shard.2.0.in b/training/dtrain/examples/parallelized/work/shard.2.0.in
index fe542b40..0528d357 100644
--- a/training/dtrain/examples/parallelized/work/shard.2.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.2.0.in
@@ -1,3 +1,3 @@
-<seg grammar="grammar/grammar.out.1.gz" id="1">ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen .</seg> ||| a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge .
-<seg grammar="grammar/grammar.out.0.gz" id="0">europas nach rassen geteiltes haus</seg> ||| europe 's divided racial house
<seg grammar="grammar/grammar.out.6.gz" id="6">das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt .</seg> ||| it will not , as america 's racial history clearly shows .
+<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
+<seg grammar="grammar/grammar.out.8.gz" id="8">der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken .</seg> ||| the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths .
diff --git a/training/dtrain/examples/parallelized/work/shard.3.0.in b/training/dtrain/examples/parallelized/work/shard.3.0.in
index 4a8fa5b1..f7cbb3e3 100644
--- a/training/dtrain/examples/parallelized/work/shard.3.0.in
+++ b/training/dtrain/examples/parallelized/work/shard.3.0.in
@@ -1 +1 @@
-<seg grammar="grammar/grammar.out.7.gz" id="7">die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen .</seg> ||| race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes .
+<seg grammar="grammar/grammar.out.9.gz" id="9">genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen .</seg> ||| this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us .
diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0
index c560fdbd..816269cd 100644
--- a/training/dtrain/examples/parallelized/work/weights.0
+++ b/training/dtrain/examples/parallelized/work/weights.0
@@ -1,12 +1,12 @@
-PhraseModel_4 -0.6990170657294328
-LanguageModel 0.891784887346263
-PhraseModel_0 -0.2107507586515428
-PhraseModel_1 -0.15442709655871997
-PhraseModel_3 -0.07262514338204715
-PhraseModel_6 -0.10965000000000148
-Glue -0.03644999999999783
-WordPenalty 0.13204723722268177
-PassThrough -0.09437500000000089
-LanguageModel_OOV -0.036775000000000564
-PhraseModel_2 0.025521702385571707
-PhraseModel_5 0.006999999999999977
+LanguageModel 1.200704259340465
+PhraseModel_4 -1.2434381298299035
+PhraseModel_1 0.050697726409824076
+PhraseModel_0 -0.516923312932941
+PhraseModel_2 0.5051987092783867
+PhraseModel_3 0.20955092377784057
+PassThrough -0.32285
+LanguageModel_OOV -0.29269999999999996
+PhraseModel_6 -0.158425
+Glue -0.05817500000000002
+WordPenalty 0.12758486142112804
+PhraseModel_5 0.02435
diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0
index 91eedc7b..be386c62 100644
--- a/training/dtrain/examples/parallelized/work/weights.0.0
+++ b/training/dtrain/examples/parallelized/work/weights.0.0
@@ -1,12 +1,11 @@
-PassThrough -0.082000000000001058
-Glue 0.25700000000000267
-LanguageModel_OOV -0.046000000000000034
-LanguageModel 0.67341721152744249
-PhraseModel_6 0.18290000000000028
-PhraseModel_5 0.0039999999999999975
-PhraseModel_4 0.47916377173928498
-PhraseModel_3 0.65577926367715722
-PhraseModel_2 0.00060731048591637909
-PhraseModel_0 0.25329462707903372
-WordPenalty 0.026926257878001431
-PhraseModel_1 0.20035945197369062
+WordPenalty -0.017632355965271129
+LanguageModel 0.72957628464102753
+LanguageModel_OOV -0.23499999999999999
+PhraseModel_0 -0.43720953659541578
+PhraseModel_1 1.0100170838129212
+PhraseModel_2 1.3524984123857073
+PhraseModel_3 -0.25541132249775761
+PhraseModel_4 -0.78115161368856911
+PhraseModel_6 -0.36810000000000004
+Glue 0.34040000000000004
+PassThrough -0.33040000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1
index 6fcc9999..d4c77d07 100644
--- a/training/dtrain/examples/parallelized/work/weights.0.1
+++ b/training/dtrain/examples/parallelized/work/weights.0.1
@@ -1,12 +1,12 @@
-PassThrough -0.2346750000000028
-Glue -0.17904999999999763
-WordPenalty 0.062125825636256168
-LanguageModel 0.66824625053667575
-LanguageModel_OOV -0.15247500000000355
-PhraseModel_0 -0.5581144363944085
-PhraseModel_1 0.12740874153205478
-PhraseModel_2 0.6038779278708799
-PhraseModel_3 -0.44463820299544454
-PhraseModel_4 -0.63136538282212662
-PhraseModel_5 -0.0084000000000000324
-PhraseModel_6 -0.20164999999999911
+WordPenalty 0.12966947493426365
+LanguageModel 0.3989224621154368
+LanguageModel_OOV -0.63139999999999996
+PhraseModel_0 -0.63991953012355962
+PhraseModel_1 0.74197897612368646
+PhraseModel_2 1.3096163833051435
+PhraseModel_3 -0.12160001974680773
+PhraseModel_4 -1.2274031286515816
+PhraseModel_5 0.02435
+PhraseModel_6 -0.210925
+Glue -0.40907500000000002
+PassThrough -0.66155000000000008
diff --git a/training/dtrain/examples/parallelized/work/weights.0.2 b/training/dtrain/examples/parallelized/work/weights.0.2
index 5668915d..8ce1449b 100644
--- a/training/dtrain/examples/parallelized/work/weights.0.2
+++ b/training/dtrain/examples/parallelized/work/weights.0.2
@@ -1,12 +1,12 @@
-PassThrough -0.38122499999999337
-Glue -0.019274999999998679
-WordPenalty 0.022192448025253487
-LanguageModel 0.4068780855136106
-LanguageModel_OOV -0.363974999999992
-PhraseModel_0 -0.36273429313029715
-PhraseModel_1 0.56431752511029298
-PhraseModel_2 0.85638010019687694
-PhraseModel_3 -0.20222345248738063
-PhraseModel_4 -0.48295466434310252
-PhraseModel_5 0.031450000000000339
-PhraseModel_6 -0.26092499999998625
+WordPenalty 0.10319922626226019
+LanguageModel 0.6647396869692952
+LanguageModel_OOV -0.622525
+PhraseModel_0 -0.59993441316076157
+PhraseModel_1 0.78991513935858193
+PhraseModel_2 1.3148638774685031
+PhraseModel_3 0.2143393571820455
+PhraseModel_4 -1.0173894637028262
+PhraseModel_5 0.02435
+PhraseModel_6 -0.18452499999999999
+Glue -0.44422499999999998
+PassThrough -0.65267500000000012
diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1
index f52e07b8..2a00be2e 100644
--- a/training/dtrain/examples/parallelized/work/weights.1
+++ b/training/dtrain/examples/parallelized/work/weights.1
@@ -1,12 +1,12 @@
-LanguageModel 0.7527067666152598
-PhraseModel_4 -0.6467221787583058
-PhraseModel_2 0.36889175522051015
-PhraseModel_0 -0.38227173053779245
-PhraseModel_3 -0.2252732111174934
-LanguageModel_OOV -0.25227499999999975
-PassThrough -0.2695250000000011
-PhraseModel_1 0.03521067244127414
-Glue -0.1579749999999981
-PhraseModel_6 -0.11932500000000047
-WordPenalty 0.0650573133891042
-PhraseModel_5 0.03475000000000043
+PhraseModel_4 -1.1379250444170055
+PhraseModel_2 1.0578050661336098
+LanguageModel 0.9343385461706668
+PhraseModel_0 -0.6917392152965985
+PhraseModel_1 0.4508371141128957
+PassThrough -0.4411750000000001
+Glue -0.265425
+LanguageModel_OOV -0.411025
+PhraseModel_3 -0.186390082624459
+PhraseModel_6 -0.188225
+WordPenalty 0.09781397468665984
+PhraseModel_5 0.02435
diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0
index 31e08d81..cdcf959e 100644
--- a/training/dtrain/examples/parallelized/work/weights.1.0
+++ b/training/dtrain/examples/parallelized/work/weights.1.0
@@ -1,11 +1,12 @@
-LanguageModel_OOV -0.044300000000000235
-PassThrough -0.19300000000000087
-PhraseModel_6 -0.18180000000000701
-LanguageModel 1.3644969337716422
-PhraseModel_3 0.017250706134911725
-PhraseModel_4 -1.5473728273858063
-Glue -0.32289999999999447
-PhraseModel_1 -0.077697953502182365
-WordPenalty 0.27968564634568688
-PhraseModel_0 -0.19048660891012237
-PhraseModel_2 0.05889844333199834
+WordPenalty 0.05433023968609621
+LanguageModel 0.69947965605855011
+LanguageModel_OOV -0.2641
+PhraseModel_0 -1.4207505705360111
+PhraseModel_1 -1.563047680441811
+PhraseModel_2 -0.21050528366541305
+PhraseModel_3 -0.17764037275860439
+PhraseModel_4 -1.6583462458159566
+PhraseModel_5 0.079399999999999998
+PhraseModel_6 0.15280000000000002
+Glue -0.27220000000000005
+PassThrough -0.23670000000000002
diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1
index 544ff462..c1bb2cf0 100644
--- a/training/dtrain/examples/parallelized/work/weights.1.1
+++ b/training/dtrain/examples/parallelized/work/weights.1.1
@@ -1,12 +1,12 @@
-PassThrough -0.42167499999999858
-Glue -0.26424999999999721
-WordPenalty 0.04788096662983269
-LanguageModel 0.78495517342352483
-LanguageModel_OOV -0.49307499999999477
-PhraseModel_0 -0.58703462849498356
-PhraseModel_1 -0.33425278954714266
-PhraseModel_2 0.20834221229630179
-PhraseModel_3 -0.043345645640208569
-PhraseModel_4 -0.60760531115816907
-PhraseModel_5 0.12300000000000186
-PhraseModel_6 -0.054150000000001031
+WordPenalty -0.0091744709302067785
+LanguageModel 0.79433413663506514
+LanguageModel_OOV -0.43090000000000001
+PhraseModel_0 -0.56242499947237046
+PhraseModel_1 0.85362516703032698
+PhraseModel_2 1.3457900890481096
+PhraseModel_3 -0.13095079554478939
+PhraseModel_4 -0.94761908497413061
+PhraseModel_5 0.02435
+PhraseModel_6 -0.160025
+Glue -0.20487500000000003
+PassThrough -0.46105000000000007
diff --git a/training/dtrain/examples/parallelized/work/weights.1.2 b/training/dtrain/examples/parallelized/work/weights.1.2
index ac3284b9..c9598a04 100644
--- a/training/dtrain/examples/parallelized/work/weights.1.2
+++ b/training/dtrain/examples/parallelized/work/weights.1.2
@@ -1,12 +1,12 @@
-PassThrough -0.34762500000000224
-Glue -0.23607500000000026
-WordPenalty 0.10931192109504413
-LanguageModel 0.81339027211983694
-LanguageModel_OOV -0.33237500000000098
-PhraseModel_0 -0.53685104648974269
-PhraseModel_1 -0.049657790506137042
-PhraseModel_2 0.40277066454544108
-PhraseModel_3 0.14600791389785803
-PhraseModel_4 -0.72850673041349101
-PhraseModel_5 0.034750000000000433
-PhraseModel_6 -0.27192499999999448
+WordPenalty 0.076359827280638559
+LanguageModel 1.3183380272921175
+LanguageModel_OOV -0.60902499999999993
+PhraseModel_0 -0.2248075206657828
+PhraseModel_1 0.86368802571834491
+PhraseModel_2 1.0746702462261808
+PhraseModel_3 0.18002263643876637
+PhraseModel_4 -0.84660750337519441
+PhraseModel_5 0.02435
+PhraseModel_6 0.11247499999999999
+Glue -0.49852500000000005
+PassThrough -0.63917500000000005
diff --git a/training/dtrain/examples/parallelized/work/weights.2 b/training/dtrain/examples/parallelized/work/weights.2
index dedaf165..310973ec 100644
--- a/training/dtrain/examples/parallelized/work/weights.2
+++ b/training/dtrain/examples/parallelized/work/weights.2
@@ -1,12 +1,12 @@
-PhraseModel_2 0.6558266927225778
-PhraseModel_4 -0.6161090299356294
-LanguageModel 0.5690697644415413
-PhraseModel_1 0.32098232482479416
-PhraseModel_0 -0.39422813904895143
-PassThrough -0.37879999999999764
-LanguageModel_OOV -0.3620499999999963
-Glue -0.1792249999999967
-PhraseModel_6 -0.18769999999999526
-PhraseModel_3 -0.10321074877850786
-WordPenalty 0.05867318450512617
-PhraseModel_5 0.03392500000000041
+PhraseModel_2 1.185520780812669
+PhraseModel_4 -1.0801541070647134
+LanguageModel 0.7741099486587568
+PhraseModel_0 -0.6265095873268189
+PhraseModel_1 0.6260421233840029
+PassThrough -0.5630000000000002
+Glue -0.495325
+LanguageModel_OOV -0.53285
+PhraseModel_3 -0.008805626854390465
+PhraseModel_6 -0.10977500000000001
+WordPenalty 0.1060655698428214
+PhraseModel_5 0.026699999999999998
diff --git a/training/dtrain/examples/parallelized/work/weights.2.0 b/training/dtrain/examples/parallelized/work/weights.2.0
index f7ece54d..3e87fed4 100644
--- a/training/dtrain/examples/parallelized/work/weights.2.0
+++ b/training/dtrain/examples/parallelized/work/weights.2.0
@@ -1,11 +1,11 @@
-LanguageModel_OOV -0.12280000000000209
-PassThrough -0.13350000000000165
-Glue -0.1259000000000001
-PhraseModel_1 0.24792740418949952
-WordPenalty 0.048293546387642321
-PhraseModel_0 0.26356693580129958
-PhraseModel_2 0.0063762787517740458
-PhraseModel_3 -0.18966358382769741
-PhraseModel_4 -0.22599681869670471
-PhraseModel_6 0.074299999999999047
-LanguageModel 0.3625416478537038
+WordPenalty 0.14922358398195767
+LanguageModel 0.79685677298009394
+LanguageModel_OOV -0.66270000000000007
+PhraseModel_0 0.37998874905310187
+PhraseModel_1 0.69213063228111271
+PhraseModel_2 0.34219807728516061
+PhraseModel_3 1.1425846772648622
+PhraseModel_4 -0.55412548521619742
+PhraseModel_6 0.067599999999999993
+Glue -0.21090000000000003
+PassThrough -0.63429999999999997
diff --git a/training/dtrain/examples/parallelized/work/weights.2.1 b/training/dtrain/examples/parallelized/work/weights.2.1
index 0946609d..d129dc49 100644
--- a/training/dtrain/examples/parallelized/work/weights.2.1
+++ b/training/dtrain/examples/parallelized/work/weights.2.1
@@ -1,12 +1,12 @@
-PassThrough -0.23927500000000015
-Glue -0.10384999999999919
-WordPenalty 0.038717353061671053
-LanguageModel 0.49412782572695274
-LanguageModel_OOV -0.24887499999999915
-PhraseModel_0 -0.32101572713801541
-PhraseModel_1 0.34413149733472631
-PhraseModel_2 0.62365535622061474
-PhraseModel_3 -0.49337445280658987
-PhraseModel_4 -0.77004673375347765
-PhraseModel_5 0.0069999999999999767
-PhraseModel_6 -0.05055000000000108
+WordPenalty 0.1593752174964457
+LanguageModel 1.5897162231676281
+LanguageModel_OOV -0.52100000000000002
+PhraseModel_0 -0.5834836741748588
+PhraseModel_1 0.29827543837280185
+PhraseModel_2 0.78493316593562568
+PhraseModel_3 0.083221832554333464
+PhraseModel_4 -0.93843312963279457
+PhraseModel_5 0.02435
+PhraseModel_6 -0.27382499999999999
+Glue -0.76607500000000006
+PassThrough -0.55115000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.2.2 b/training/dtrain/examples/parallelized/work/weights.2.2
index b766fc75..bcc83b44 100644
--- a/training/dtrain/examples/parallelized/work/weights.2.2
+++ b/training/dtrain/examples/parallelized/work/weights.2.2
@@ -1,12 +1,12 @@
-PassThrough -0.46082499999999499
-Glue -0.32907499999998979
-WordPenalty 0.049596429833348527
-LanguageModel 0.33496341201347335
-LanguageModel_OOV -0.44357499999999361
-PhraseModel_0 -0.30679883980783829
-PhraseModel_1 0.5937585900939707
-PhraseModel_2 0.86415970329021152
-PhraseModel_3 -0.21072279838022553
-PhraseModel_4 -0.65734339854224544
-PhraseModel_5 0.034750000000000433
-PhraseModel_6 -0.10652500000000011
+WordPenalty 0.10819361280414735
+LanguageModel 0.52389743342585859
+LanguageModel_OOV -0.41622500000000001
+PhraseModel_0 -0.86867995703334211
+PhraseModel_1 0.40783818771767943
+PhraseModel_2 1.1792706530114188
+PhraseModel_3 -0.2469805689928464
+PhraseModel_4 -1.2352895858909159
+PhraseModel_5 0.033750000000000002
+PhraseModel_6 -0.17882500000000001
+Glue -0.90862500000000002
+PassThrough -0.44637500000000013
diff --git a/training/dtrain/examples/parallelized/work/weights.3.0 b/training/dtrain/examples/parallelized/work/weights.3.0
index 403ffbb3..e3586048 100644
--- a/training/dtrain/examples/parallelized/work/weights.3.0
+++ b/training/dtrain/examples/parallelized/work/weights.3.0
@@ -1,12 +1,12 @@
-PhraseModel_4 -1.501862388574505
-PhraseModel_3 -0.77386695951256013
-PhraseModel_6 -0.51399999999999824
-PhraseModel_5 0.02399999999999991
-LanguageModel 1.1666837562322641
-PhraseModel_2 0.036204776972598059
-PassThrough 0.030999999999999975
-Glue 0.046000000000000582
-PhraseModel_1 -0.98829728889588764
-WordPenalty 0.1732834982793964
-PhraseModel_0 -1.1693779885763822
-LanguageModel_OOV 0.066000000000000086
+WordPenalty 0.32441797798172944
+LanguageModel 2.5769043236821889
+LanguageModel_OOV -0.0090000000000000011
+PhraseModel_0 -0.58972189365343919
+PhraseModel_1 0.063690869987073351
+PhraseModel_2 0.53660363110809217
+PhraseModel_3 0.12867071310286207
+PhraseModel_4 -1.9801291745988916
+PhraseModel_5 0.018000000000000002
+PhraseModel_6 -0.48600000000000004
+Glue -0.090000000000000011
+PassThrough -0.090000000000000011
diff --git a/training/dtrain/examples/parallelized/work/weights.3.1 b/training/dtrain/examples/parallelized/work/weights.3.1
index c171d586..b27687d3 100644
--- a/training/dtrain/examples/parallelized/work/weights.3.1
+++ b/training/dtrain/examples/parallelized/work/weights.3.1
@@ -1,12 +1,12 @@
-PassThrough -0.18247500000000313
-Glue -0.084749999999998368
-WordPenalty 0.11150510822865688
-LanguageModel 1.063497816773886
-LanguageModel_OOV -0.1146750000000015
-PhraseModel_0 -0.062922130123762257
-PhraseModel_1 0.0035552404454581212
-PhraseModel_2 0.039691524494244249
-PhraseModel_3 0.080265456972269417
-PhraseModel_4 -0.57787128729945014
-PhraseModel_5 0.017399999999999922
-PhraseModel_6 -0.17095000000000066
+WordPenalty 0.11138567724613679
+LanguageModel 0.95438136276453733
+LanguageModel_OOV -0.060799999999999937
+PhraseModel_0 -0.98112865741560529
+PhraseModel_1 -0.090531125075232435
+PhraseModel_2 0.79088062624556033
+PhraseModel_3 -0.57623134776057228
+PhraseModel_4 -1.4382448344095151
+PhraseModel_5 0.02435
+PhraseModel_6 -0.108125
+Glue 0.31832499999999997
+PassThrough -0.090950000000000003
diff --git a/training/dtrain/examples/parallelized/work/weights.3.2 b/training/dtrain/examples/parallelized/work/weights.3.2
index 3ff0411d..ccb591a2 100644
--- a/training/dtrain/examples/parallelized/work/weights.3.2
+++ b/training/dtrain/examples/parallelized/work/weights.3.2
@@ -1,12 +1,12 @@
-PassThrough -0.32552500000000006
-Glue -0.13247499999999815
-WordPenalty 0.053591939066858545
-LanguageModel 0.72104728811924446
-LanguageModel_OOV -0.30827499999999869
-PhraseModel_0 -0.37052837676792744
-PhraseModel_1 0.17551097460105014
-PhraseModel_2 0.49999630285778179
-PhraseModel_3 -0.14590465814428336
-PhraseModel_4 -0.59563132644367889
-PhraseModel_5 0.034750000000000433
-PhraseModel_6 -0.11142500000000025
+WordPenalty 0.13650961302423945
+LanguageModel 0.58946464694775647
+LanguageModel_OOV -0.48362499999999997
+PhraseModel_0 -0.81261645844738917
+PhraseModel_1 0.44272714074140529
+PhraseModel_2 1.1732783465445731
+PhraseModel_3 -0.18260393204552733
+PhraseModel_4 -1.2213298752899167
+PhraseModel_5 0.02435
+PhraseModel_6 -0.188225
+Glue -0.12992500000000001
+PassThrough -0.51377500000000009