summaryrefslogtreecommitdiff
path: root/training/dtrain/test/example/expected-output
diff options
context:
space:
mode:
authorChris Dyer <cdyer@allegro.clab.cs.cmu.edu>2012-11-18 13:35:42 -0500
committerChris Dyer <cdyer@allegro.clab.cs.cmu.edu>2012-11-18 13:35:42 -0500
commit8aa29810bb77611cc20b7a384897ff6703783ea1 (patch)
tree8635daa8fffb3f2cd90e30b41e27f4f9e0909447 /training/dtrain/test/example/expected-output
parentfbdacabc85bea65d735f2cb7f92b98e08ce72d04 (diff)
major restructure of the training code
Diffstat (limited to 'training/dtrain/test/example/expected-output')
-rw-r--r--training/dtrain/test/example/expected-output89
1 files changed, 89 insertions, 0 deletions
diff --git a/training/dtrain/test/example/expected-output b/training/dtrain/test/example/expected-output
new file mode 100644
index 00000000..05326763
--- /dev/null
+++ b/training/dtrain/test/example/expected-output
@@ -0,0 +1,89 @@
+ cdec cfg 'test/example/cdec.ini'
+Loading the LM will be faster if you build a binary file.
+Reading test/example/nc-wmt11.en.srilm.gz
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+ Example feature: Shape_S00000_T00000
+Seeding random number sequence to 2912000813
+
+dtrain
+Parameters:
+ k 100
+ N 4
+ T 2
+ scorer 'stupid_bleu'
+ sample from 'kbest'
+ filter 'uniq'
+ learning rate 1
+ gamma 0
+ loss margin 0
+ pairs 'XYX'
+ hi lo 0.1
+ pair threshold 0
+ select weights 'VOID'
+ l1 reg 0 'none'
+ max pairs 4294967295
+ cdec cfg 'test/example/cdec.ini'
+ input 'test/example/nc-wmt11.1k.gz'
+ output '-'
+ stop_after 10
+(a dot represents 10 inputs)
+Iteration #1 of 2.
+ . 10
+Stopping after 10 input sentences.
+WEIGHTS
+ Glue = -637
+ WordPenalty = +1064
+ LanguageModel = +1175.3
+ LanguageModel_OOV = -1437
+ PhraseModel_0 = +1935.6
+ PhraseModel_1 = +2499.3
+ PhraseModel_2 = +964.96
+ PhraseModel_3 = +1410.8
+ PhraseModel_4 = -5977.9
+ PhraseModel_5 = +522
+ PhraseModel_6 = +1089
+ PassThrough = -1308
+ ---
+ 1best avg score: 0.16963 (+0.16963)
+ 1best avg model score: 64485 (+64485)
+ avg # pairs: 1494.4
+ avg # rank err: 702.6
+ avg # margin viol: 0
+ non0 feature count: 528
+ avg list sz: 85.7
+ avg f count: 102.75
+(time 0.083 min, 0.5 s/S)
+
+Iteration #2 of 2.
+ . 10
+WEIGHTS
+ Glue = -1196
+ WordPenalty = +809.52
+ LanguageModel = +3112.1
+ LanguageModel_OOV = -1464
+ PhraseModel_0 = +3895.5
+ PhraseModel_1 = +4683.4
+ PhraseModel_2 = +1092.8
+ PhraseModel_3 = +1079.6
+ PhraseModel_4 = -6827.7
+ PhraseModel_5 = -888
+ PhraseModel_6 = +142
+ PassThrough = -1335
+ ---
+ 1best avg score: 0.277 (+0.10736)
+ 1best avg model score: -3110.5 (-67595)
+ avg # pairs: 1144.2
+ avg # rank err: 529.1
+ avg # margin viol: 0
+ non0 feature count: 859
+ avg list sz: 74.9
+ avg f count: 112.84
+(time 0.067 min, 0.4 s/S)
+
+Writing weights file to '-' ...
+done
+
+---
+Best iteration: 2 [SCORE 'stupid_bleu'=0.277].
+This took 0.15 min.