From b6754386f1109b960b05cdf2eabbc97bdd38e8df Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Mon, 29 Apr 2013 15:24:39 +0200 Subject: fix, cleaned up headers --- training/dtrain/examples/standard/dtrain.ini | 24 +++---- training/dtrain/examples/standard/expected-output | 84 +++++++++++------------ 2 files changed, 54 insertions(+), 54 deletions(-) (limited to 'training/dtrain/examples/standard') diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index e1072d30..23e94285 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -10,15 +10,15 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr stop_after=10 # stop epoch after 10 inputs # interesting stuff -epochs=2 # run over input 2 times -k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 -scorer=stupid_bleu # use 'stupid' BLEU+1 -learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) -gamma=0 # use SVM reg -sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only unique entries in kbest (surface form) -pair_sampling=XYX # -hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0 # minimum distance in BLEU (here: > 0) -loss_margin=0 # update if correctly ranked, but within this margin +epochs=2 # run over input 2 times +k=100 # use 100best lists +N=4 # optimize (approx) BLEU4 +scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 +learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) +gamma=0 # use SVM reg +sample_from=kbest # use kbest lists (as opposed to forest) +filter=uniq # only unique entries in kbest (surface form) +pair_sampling=XYX # +hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here +pair_threshold=0 # minimum distance in BLEU (here: > 0) +loss_margin=0 # update if correctly ranked, but within this margin diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output index 7cd09dbf..9a25062b 100644 --- a/training/dtrain/examples/standard/expected-output +++ b/training/dtrain/examples/standard/expected-output @@ -4,14 +4,14 @@ Reading ./nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** Example feature: Shape_S00000_T00000 -Seeding random number sequence to 2679584485 +Seeding random number sequence to 1677737427 dtrain Parameters: k 100 N 4 T 2 - scorer 'stupid_bleu' + scorer 'fixed_stupid_bleu' sample from 'kbest' filter 'uniq' learning rate 1 @@ -34,58 +34,58 @@ Iteration #1 of 2. . 10 Stopping after 10 input sentences. WEIGHTS - Glue = -576 - WordPenalty = +417.79 - LanguageModel = +5117.5 - LanguageModel_OOV = -1307 - PhraseModel_0 = -1612 - PhraseModel_1 = -2159.6 - PhraseModel_2 = -677.36 - PhraseModel_3 = +2663.8 - PhraseModel_4 = -1025.9 - PhraseModel_5 = -8 - PhraseModel_6 = +70 - PassThrough = -1455 + Glue = -1155 + WordPenalty = -329.63 + LanguageModel = +3903 + LanguageModel_OOV = -1630 + PhraseModel_0 = +2746.9 + PhraseModel_1 = +1200.3 + PhraseModel_2 = -1004.1 + PhraseModel_3 = +2223.1 + PhraseModel_4 = +551.58 + PhraseModel_5 = +217 + PhraseModel_6 = +1816 + PassThrough = -1603 --- - 1best avg score: 0.27697 (+0.27697) - 1best avg model score: -47918 (-47918) - avg # pairs: 581.9 (meaningless) - avg # rank err: 581.9 + 1best avg score: 0.19344 (+0.19344) + 1best avg model score: 81387 (+81387) + avg # pairs: 616.3 (meaningless) + avg # rank err: 616.3 avg # margin viol: 0 - non0 feature count: 703 + non0 feature count: 673 avg list sz: 90.9 - avg f count: 100.09 -(time 0.25 min, 1.5 s/S) + avg f count: 104.26 +(time 0.38 min, 2.3 s/S) Iteration #2 of 2. . 10 WEIGHTS - Glue = -622 - WordPenalty = +898.56 - LanguageModel = +8066.2 - LanguageModel_OOV = -2590 - PhraseModel_0 = -4335.8 - PhraseModel_1 = -5864.4 - PhraseModel_2 = -1729.8 - PhraseModel_3 = +2831.9 - PhraseModel_4 = -5384.8 - PhraseModel_5 = +1449 - PhraseModel_6 = +480 - PassThrough = -2578 + Glue = -994 + WordPenalty = -778.69 + LanguageModel = +2348.9 + LanguageModel_OOV = -1967 + PhraseModel_0 = -412.72 + PhraseModel_1 = +1428.9 + PhraseModel_2 = +1967.4 + PhraseModel_3 = -944.99 + PhraseModel_4 = -239.7 + PhraseModel_5 = +708 + PhraseModel_6 = +645 + PassThrough = -1866 --- - 1best avg score: 0.37119 (+0.094226) - 1best avg model score: -1.3174e+05 (-83822) - avg # pairs: 584.1 (meaningless) - avg # rank err: 584.1 + 1best avg score: 0.22395 (+0.03051) + 1best avg model score: -31388 (-1.1278e+05) + avg # pairs: 702.3 (meaningless) + avg # rank err: 702.3 avg # margin viol: 0 - non0 feature count: 1115 + non0 feature count: 955 avg list sz: 91.3 - avg f count: 90.755 -(time 0.3 min, 1.8 s/S) + avg f count: 103.45 +(time 0.32 min, 1.9 s/S) Writing weights file to '-' ... done --- -Best iteration: 2 [SCORE 'stupid_bleu'=0.37119]. -This took 0.55 min. +Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.22395]. +This took 0.7 min. -- cgit v1.2.3 From f9a28c1e84e0149e301c7fc019e0fb9f6b2fd6c7 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Thu, 2 May 2013 10:04:07 +0200 Subject: updated example --- training/dtrain/examples/standard/expected-output | 82 +++++++++++------------ 1 file changed, 41 insertions(+), 41 deletions(-) (limited to 'training/dtrain/examples/standard') diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output index 9a25062b..21f91244 100644 --- a/training/dtrain/examples/standard/expected-output +++ b/training/dtrain/examples/standard/expected-output @@ -4,7 +4,7 @@ Reading ./nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** Example feature: Shape_S00000_T00000 -Seeding random number sequence to 1677737427 +Seeding random number sequence to 970626287 dtrain Parameters: @@ -34,58 +34,58 @@ Iteration #1 of 2. . 10 Stopping after 10 input sentences. WEIGHTS - Glue = -1155 - WordPenalty = -329.63 - LanguageModel = +3903 - LanguageModel_OOV = -1630 - PhraseModel_0 = +2746.9 - PhraseModel_1 = +1200.3 - PhraseModel_2 = -1004.1 - PhraseModel_3 = +2223.1 - PhraseModel_4 = +551.58 - PhraseModel_5 = +217 - PhraseModel_6 = +1816 - PassThrough = -1603 + Glue = -614 + WordPenalty = +1256.8 + LanguageModel = +5610.5 + LanguageModel_OOV = -1449 + PhraseModel_0 = -2107 + PhraseModel_1 = -4666.1 + PhraseModel_2 = -2713.5 + PhraseModel_3 = +4204.3 + PhraseModel_4 = -1435.8 + PhraseModel_5 = +916 + PhraseModel_6 = +190 + PassThrough = -2527 --- - 1best avg score: 0.19344 (+0.19344) - 1best avg model score: 81387 (+81387) - avg # pairs: 616.3 (meaningless) - avg # rank err: 616.3 + 1best avg score: 0.17874 (+0.17874) + 1best avg model score: 88399 (+88399) + avg # pairs: 798.2 (meaningless) + avg # rank err: 798.2 avg # margin viol: 0 - non0 feature count: 673 - avg list sz: 90.9 - avg f count: 104.26 -(time 0.38 min, 2.3 s/S) + non0 feature count: 887 + avg list sz: 91.3 + avg f count: 126.85 +(time 0.33 min, 2 s/S) Iteration #2 of 2. . 10 WEIGHTS - Glue = -994 - WordPenalty = -778.69 - LanguageModel = +2348.9 - LanguageModel_OOV = -1967 - PhraseModel_0 = -412.72 - PhraseModel_1 = +1428.9 - PhraseModel_2 = +1967.4 - PhraseModel_3 = -944.99 - PhraseModel_4 = -239.7 - PhraseModel_5 = +708 - PhraseModel_6 = +645 - PassThrough = -1866 + Glue = -1025 + WordPenalty = +1751.5 + LanguageModel = +10059 + LanguageModel_OOV = -4490 + PhraseModel_0 = -2640.7 + PhraseModel_1 = -3757.4 + PhraseModel_2 = -1133.1 + PhraseModel_3 = +1837.3 + PhraseModel_4 = -3534.3 + PhraseModel_5 = +2308 + PhraseModel_6 = +1677 + PassThrough = -6222 --- - 1best avg score: 0.22395 (+0.03051) - 1best avg model score: -31388 (-1.1278e+05) - avg # pairs: 702.3 (meaningless) - avg # rank err: 702.3 + 1best avg score: 0.30764 (+0.12891) + 1best avg model score: -2.5042e+05 (-3.3882e+05) + avg # pairs: 725.9 (meaningless) + avg # rank err: 725.9 avg # margin viol: 0 - non0 feature count: 955 + non0 feature count: 1499 avg list sz: 91.3 - avg f count: 103.45 + avg f count: 114.34 (time 0.32 min, 1.9 s/S) Writing weights file to '-' ... done --- -Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.22395]. -This took 0.7 min. +Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764]. +This took 0.65 min. -- cgit v1.2.3