From 86ea4ed498d96c1d988f2287afa580dcf558ddb0 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Sat, 19 Sep 2015 10:58:06 +0200 Subject: dtrain: removed old stuff --- training/dtrain/examples/parallelized/README | 5 - training/dtrain/examples/parallelized/cdec.ini | 22 -- training/dtrain/examples/parallelized/dtrain.ini | 14 - .../examples/parallelized/grammar/grammar.out.0.gz | Bin 8318 -> 0 bytes .../examples/parallelized/grammar/grammar.out.1.gz | Bin 358560 -> 0 bytes .../examples/parallelized/grammar/grammar.out.2.gz | Bin 1014466 -> 0 bytes .../examples/parallelized/grammar/grammar.out.3.gz | Bin 391811 -> 0 bytes .../examples/parallelized/grammar/grammar.out.4.gz | Bin 149590 -> 0 bytes .../examples/parallelized/grammar/grammar.out.5.gz | Bin 537024 -> 0 bytes .../examples/parallelized/grammar/grammar.out.6.gz | Bin 291286 -> 0 bytes .../examples/parallelized/grammar/grammar.out.7.gz | Bin 1038140 -> 0 bytes .../examples/parallelized/grammar/grammar.out.8.gz | Bin 419889 -> 0 bytes .../examples/parallelized/grammar/grammar.out.9.gz | Bin 409140 -> 0 bytes training/dtrain/examples/parallelized/in | 10 - training/dtrain/examples/parallelized/refs | 10 - training/dtrain/examples/parallelized/work/out.0.0 | 62 ----- training/dtrain/examples/parallelized/work/out.0.1 | 63 ----- training/dtrain/examples/parallelized/work/out.1.0 | 62 ----- training/dtrain/examples/parallelized/work/out.1.1 | 63 ----- .../dtrain/examples/parallelized/work/shard.0.0.in | 5 - .../examples/parallelized/work/shard.0.0.refs | 5 - .../dtrain/examples/parallelized/work/shard.1.0.in | 5 - .../examples/parallelized/work/shard.1.0.refs | 5 - .../dtrain/examples/parallelized/work/weights.0 | 12 - .../dtrain/examples/parallelized/work/weights.0.0 | 12 - .../dtrain/examples/parallelized/work/weights.0.1 | 12 - .../dtrain/examples/parallelized/work/weights.1 | 12 - .../dtrain/examples/parallelized/work/weights.1.0 | 11 - .../dtrain/examples/parallelized/work/weights.1.1 | 12 - training/dtrain/examples/standard/README | 2 - training/dtrain/examples/standard/cdec.ini | 27 -- training/dtrain/examples/standard/dtrain.ini | 27 -- training/dtrain/examples/standard/expected-output | 123 --------- training/dtrain/examples/standard/nc-wmt11.de.gz | Bin 58324 -> 0 bytes training/dtrain/examples/standard/nc-wmt11.en.gz | Bin 49600 -> 0 bytes .../dtrain/examples/standard/nc-wmt11.en.srilm.gz | Bin 16017291 -> 0 bytes .../dtrain/examples/standard/nc-wmt11.grammar.gz | Bin 1399924 -> 0 bytes training/dtrain/examples/standard/nc-wmt11.gz | Bin 113504 -> 0 bytes training/dtrain/examples/toy/cdec.ini | 4 - training/dtrain/examples/toy/dtrain.ini | 13 - training/dtrain/examples/toy/expected-output | 77 ------ training/dtrain/examples/toy/grammar.gz | Bin 219 -> 0 bytes training/dtrain/examples/toy/src | 2 - training/dtrain/examples/toy/tgt | 2 - training/dtrain/kbestget.h | 88 ------- training/dtrain/ksampler.h | 60 ----- training/dtrain/pairsampling.h | 141 ---------- training/dtrain/score.cc | 283 --------------------- 48 files changed, 1251 deletions(-) delete mode 100644 training/dtrain/examples/parallelized/README delete mode 100644 training/dtrain/examples/parallelized/cdec.ini delete mode 100644 training/dtrain/examples/parallelized/dtrain.ini delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.0.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.1.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.2.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.3.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.4.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.5.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.6.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.7.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.8.gz delete mode 100644 training/dtrain/examples/parallelized/grammar/grammar.out.9.gz delete mode 100644 training/dtrain/examples/parallelized/in delete mode 100644 training/dtrain/examples/parallelized/refs delete mode 100644 training/dtrain/examples/parallelized/work/out.0.0 delete mode 100644 training/dtrain/examples/parallelized/work/out.0.1 delete mode 100644 training/dtrain/examples/parallelized/work/out.1.0 delete mode 100644 training/dtrain/examples/parallelized/work/out.1.1 delete mode 100644 training/dtrain/examples/parallelized/work/shard.0.0.in delete mode 100644 training/dtrain/examples/parallelized/work/shard.0.0.refs delete mode 100644 training/dtrain/examples/parallelized/work/shard.1.0.in delete mode 100644 training/dtrain/examples/parallelized/work/shard.1.0.refs delete mode 100644 training/dtrain/examples/parallelized/work/weights.0 delete mode 100644 training/dtrain/examples/parallelized/work/weights.0.0 delete mode 100644 training/dtrain/examples/parallelized/work/weights.0.1 delete mode 100644 training/dtrain/examples/parallelized/work/weights.1 delete mode 100644 training/dtrain/examples/parallelized/work/weights.1.0 delete mode 100644 training/dtrain/examples/parallelized/work/weights.1.1 delete mode 100644 training/dtrain/examples/standard/README delete mode 100644 training/dtrain/examples/standard/cdec.ini delete mode 100644 training/dtrain/examples/standard/dtrain.ini delete mode 100644 training/dtrain/examples/standard/expected-output delete mode 100644 training/dtrain/examples/standard/nc-wmt11.de.gz delete mode 100644 training/dtrain/examples/standard/nc-wmt11.en.gz delete mode 100644 training/dtrain/examples/standard/nc-wmt11.en.srilm.gz delete mode 100644 training/dtrain/examples/standard/nc-wmt11.grammar.gz delete mode 100644 training/dtrain/examples/standard/nc-wmt11.gz delete mode 100644 training/dtrain/examples/toy/cdec.ini delete mode 100644 training/dtrain/examples/toy/dtrain.ini delete mode 100644 training/dtrain/examples/toy/expected-output delete mode 100644 training/dtrain/examples/toy/grammar.gz delete mode 100644 training/dtrain/examples/toy/src delete mode 100644 training/dtrain/examples/toy/tgt delete mode 100644 training/dtrain/kbestget.h delete mode 100644 training/dtrain/ksampler.h delete mode 100644 training/dtrain/pairsampling.h delete mode 100644 training/dtrain/score.cc (limited to 'training/dtrain') diff --git a/training/dtrain/examples/parallelized/README b/training/dtrain/examples/parallelized/README deleted file mode 100644 index 89715105..00000000 --- a/training/dtrain/examples/parallelized/README +++ /dev/null @@ -1,5 +0,0 @@ -run for example - ../../parallelize.rb ./dtrain.ini 4 false 2 2 ./in ./refs - -final weights will be in the file work/weights.3 - diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini deleted file mode 100644 index 5773029a..00000000 --- a/training/dtrain/examples/parallelized/cdec.ini +++ /dev/null @@ -1,22 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -intersection_strategy=cube_pruning -cubepruning_pop_limit=200 -scfg_max_span_limit=15 -feature_function=WordPenalty -feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -#feature_function=RuleIdentityFeatures -#feature_function=RuleNgramFeatures -#feature_function=RuleShape -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini deleted file mode 100644 index 0b0932d6..00000000 --- a/training/dtrain/examples/parallelized/dtrain.ini +++ /dev/null @@ -1,14 +0,0 @@ -k=100 -N=4 -learning_rate=0.0001 -gamma=0 -loss_margin=1.0 -epochs=1 -scorer=stupid_bleu -sample_from=kbest -filter=uniq -pair_sampling=XYX -hi_lo=0.1 -select_weights=last -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -decoder_config=cdec.ini diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz deleted file mode 100644 index 1e28a24b..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.0.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz deleted file mode 100644 index 372f5675..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.1.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz deleted file mode 100644 index 145d0dc0..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.2.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz deleted file mode 100644 index 105593ff..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.3.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz deleted file mode 100644 index 30781f48..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.4.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz deleted file mode 100644 index 834ee759..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.5.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz deleted file mode 100644 index 2e76f348..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.6.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz deleted file mode 100644 index 3741a887..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.7.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz deleted file mode 100644 index ebf6bd0c..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.8.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz b/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz deleted file mode 100644 index c1791059..00000000 Binary files a/training/dtrain/examples/parallelized/grammar/grammar.out.9.gz and /dev/null differ diff --git a/training/dtrain/examples/parallelized/in b/training/dtrain/examples/parallelized/in deleted file mode 100644 index 51d01fe7..00000000 --- a/training/dtrain/examples/parallelized/in +++ /dev/null @@ -1,10 +0,0 @@ -europas nach rassen geteiltes haus -ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen . -der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln . -während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden . -eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern . -die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden . -das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt . -die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen . -der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken . -genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen . diff --git a/training/dtrain/examples/parallelized/refs b/training/dtrain/examples/parallelized/refs deleted file mode 100644 index 632e27b0..00000000 --- a/training/dtrain/examples/parallelized/refs +++ /dev/null @@ -1,10 +0,0 @@ -europe 's divided racial house -a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . -the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . -while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . -an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . -mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . -it will not , as america 's racial history clearly shows . -race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . -the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . -this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0 deleted file mode 100644 index c559dd4d..00000000 --- a/training/dtrain/examples/parallelized/work/out.0.0 +++ /dev/null @@ -1,62 +0,0 @@ - cdec cfg 'cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** -Seeding random number sequence to 405292278 - -dtrain -Parameters: - k 100 - N 4 - T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'work/shard.0.0.in' - refs 'work/shard.0.0.refs' - output 'work/weights.0.0' -(a dot represents 10 inputs) -Iteration #1 of 1. - 5 -WEIGHTS - Glue = +0.2663 - WordPenalty = -0.0079042 - LanguageModel = +0.44782 - LanguageModel_OOV = -0.0401 - PhraseModel_0 = -0.193 - PhraseModel_1 = +0.71321 - PhraseModel_2 = +0.85196 - PhraseModel_3 = -0.43986 - PhraseModel_4 = -0.44803 - PhraseModel_5 = -0.0538 - PhraseModel_6 = -0.1788 - PassThrough = -0.1477 - --- - 1best avg score: 0.17521 (+0.17521) - 1best avg model score: 21.556 (+21.556) - avg # pairs: 1671.2 - avg # rank err: 1118.6 - avg # margin viol: 552.6 - non0 feature count: 12 - avg list sz: 100 - avg f count: 11.32 -(time 0.35 min, 4.2 s/S) - -Writing weights file to 'work/weights.0.0' ... -done - ---- -Best iteration: 1 [SCORE 'stupid_bleu'=0.17521]. -This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1 deleted file mode 100644 index 8bc7ea9c..00000000 --- a/training/dtrain/examples/parallelized/work/out.0.1 +++ /dev/null @@ -1,63 +0,0 @@ - cdec cfg 'cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** -Seeding random number sequence to 43859692 - -dtrain -Parameters: - k 100 - N 4 - T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'work/shard.0.0.in' - refs 'work/shard.0.0.refs' - output 'work/weights.0.1' - weights in 'work/weights.0' -(a dot represents 10 inputs) -Iteration #1 of 1. - 5 -WEIGHTS - Glue = -0.2699 - WordPenalty = +0.080605 - LanguageModel = -0.026572 - LanguageModel_OOV = -0.30025 - PhraseModel_0 = -0.32076 - PhraseModel_1 = +0.67451 - PhraseModel_2 = +0.92 - PhraseModel_3 = -0.36402 - PhraseModel_4 = -0.592 - PhraseModel_5 = -0.0269 - PhraseModel_6 = -0.28755 - PassThrough = -0.33285 - --- - 1best avg score: 0.26638 (+0.26638) - 1best avg model score: 53.197 (+53.197) - avg # pairs: 2028.6 - avg # rank err: 998.2 - avg # margin viol: 918.8 - non0 feature count: 12 - avg list sz: 100 - avg f count: 10.496 -(time 0.35 min, 4.2 s/S) - -Writing weights file to 'work/weights.0.1' ... -done - ---- -Best iteration: 1 [SCORE 'stupid_bleu'=0.26638]. -This took 0.35 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0 deleted file mode 100644 index 65d1e7dc..00000000 --- a/training/dtrain/examples/parallelized/work/out.1.0 +++ /dev/null @@ -1,62 +0,0 @@ - cdec cfg 'cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** -Seeding random number sequence to 4126799437 - -dtrain -Parameters: - k 100 - N 4 - T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'work/shard.1.0.in' - refs 'work/shard.1.0.refs' - output 'work/weights.1.0' -(a dot represents 10 inputs) -Iteration #1 of 1. - 5 -WEIGHTS - Glue = -0.3815 - WordPenalty = +0.20064 - LanguageModel = +0.95304 - LanguageModel_OOV = -0.264 - PhraseModel_0 = -0.22362 - PhraseModel_1 = +0.12254 - PhraseModel_2 = +0.26328 - PhraseModel_3 = +0.38018 - PhraseModel_4 = -0.48654 - PhraseModel_5 = +0 - PhraseModel_6 = -0.3645 - PassThrough = -0.2216 - --- - 1best avg score: 0.10863 (+0.10863) - 1best avg model score: -4.9841 (-4.9841) - avg # pairs: 1345.4 - avg # rank err: 822.4 - avg # margin viol: 501 - non0 feature count: 11 - avg list sz: 100 - avg f count: 11.814 -(time 0.43 min, 5.2 s/S) - -Writing weights file to 'work/weights.1.0' ... -done - ---- -Best iteration: 1 [SCORE 'stupid_bleu'=0.10863]. -This took 0.43333 min. diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1 deleted file mode 100644 index f479fbbc..00000000 --- a/training/dtrain/examples/parallelized/work/out.1.1 +++ /dev/null @@ -1,63 +0,0 @@ - cdec cfg 'cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ../standard//nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** -Seeding random number sequence to 2112412848 - -dtrain -Parameters: - k 100 - N 4 - T 1 - scorer 'stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.0001 - gamma 0 - loss margin 1 - faster perceptron 0 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'work/shard.1.0.in' - refs 'work/shard.1.0.refs' - output 'work/weights.1.1' - weights in 'work/weights.0' -(a dot represents 10 inputs) -Iteration #1 of 1. - 5 -WEIGHTS - Glue = -0.3178 - WordPenalty = +0.11092 - LanguageModel = +0.17269 - LanguageModel_OOV = -0.13485 - PhraseModel_0 = -0.45371 - PhraseModel_1 = +0.38789 - PhraseModel_2 = +0.75311 - PhraseModel_3 = -0.38163 - PhraseModel_4 = -0.58817 - PhraseModel_5 = -0.0269 - PhraseModel_6 = -0.27315 - PassThrough = -0.16745 - --- - 1best avg score: 0.13169 (+0.13169) - 1best avg model score: 24.226 (+24.226) - avg # pairs: 1951.2 - avg # rank err: 985.4 - avg # margin viol: 951 - non0 feature count: 12 - avg list sz: 100 - avg f count: 11.224 -(time 0.45 min, 5.4 s/S) - -Writing weights file to 'work/weights.1.1' ... -done - ---- -Best iteration: 1 [SCORE 'stupid_bleu'=0.13169]. -This took 0.45 min. diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.in b/training/dtrain/examples/parallelized/work/shard.0.0.in deleted file mode 100644 index 92f9c78e..00000000 --- a/training/dtrain/examples/parallelized/work/shard.0.0.in +++ /dev/null @@ -1,5 +0,0 @@ -europas nach rassen geteiltes haus -ein gemeinsames merkmal aller extremen rechten in europa ist ihr rassismus und die tatsache , daß sie das einwanderungsproblem als politischen hebel benutzen . -der lega nord in italien , der vlaams block in den niederlanden , die anhänger von le pens nationaler front in frankreich , sind beispiele für parteien oder bewegungen , die sich um das gemeinsame thema : ablehnung der zuwanderung gebildet haben und um forderung nach einer vereinfachten politik , um sie zu regeln . -während individuen wie jörg haidar und jean @-@ marie le pen kommen und ( leider nicht zu bald ) wieder gehen mögen , wird die rassenfrage aus der europäischer politik nicht so bald verschwinden . -eine alternde einheimische bevölkerung und immer offenere grenzen vermehren die rassistische zersplitterung in den europäischen ländern . diff --git a/training/dtrain/examples/parallelized/work/shard.0.0.refs b/training/dtrain/examples/parallelized/work/shard.0.0.refs deleted file mode 100644 index bef68fee..00000000 --- a/training/dtrain/examples/parallelized/work/shard.0.0.refs +++ /dev/null @@ -1,5 +0,0 @@ -europe 's divided racial house -a common feature of europe 's extreme right is its racism and use of the immigration issue as a political wedge . -the lega nord in italy , the vlaams blok in the netherlands , the supporters of le pen 's national front in france , are all examples of parties or movements formed on the common theme of aversion to immigrants and promotion of simplistic policies to control them . -while individuals like jorg haidar and jean @-@ marie le pen may come and ( never to soon ) go , the race question will not disappear from european politics anytime soon . -an aging population at home and ever more open borders imply increasing racial fragmentation in european countries . diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.in b/training/dtrain/examples/parallelized/work/shard.1.0.in deleted file mode 100644 index b7695ce7..00000000 --- a/training/dtrain/examples/parallelized/work/shard.1.0.in +++ /dev/null @@ -1,5 +0,0 @@ -die großen parteien der rechten und der linken mitte haben sich dem problem gestellt , in dem sie den kopf in den sand gesteckt und allen aussichten zuwider gehofft haben , es möge bald verschwinden . -das aber wird es nicht , wie die geschichte des rassismus in amerika deutlich zeigt . -die beziehungen zwischen den rassen standen in den usa über jahrzehnte - und tun das noch heute - im zentrum der politischen debatte . das ging so weit , daß rassentrennung genauso wichtig wie das einkommen wurde , - wenn nicht sogar noch wichtiger - um politische zuneigungen und einstellungen zu bestimmen . -der erste schritt , um mit der rassenfrage umzugehen ist , ursache und folgen rassistischer feindseligkeiten zu verstehen , auch dann , wenn das bedeutet , unangenehme tatsachen aufzudecken . -genau das haben in den usa eine große anzahl an forschungsvorhaben in wirtschaft , soziologie , psychologie und politikwissenschaft geleistet . diese forschungen zeigten , daß menschen unterschiedlicher rasse einander deutlich weniger vertrauen . diff --git a/training/dtrain/examples/parallelized/work/shard.1.0.refs b/training/dtrain/examples/parallelized/work/shard.1.0.refs deleted file mode 100644 index 6076f6d5..00000000 --- a/training/dtrain/examples/parallelized/work/shard.1.0.refs +++ /dev/null @@ -1,5 +0,0 @@ -mainstream parties of the center left and center right have confronted this prospect by hiding their heads in the ground , hoping against hope that the problem will disappear . -it will not , as america 's racial history clearly shows . -race relations in the us have been for decades - and remain - at the center of political debate , to the point that racial cleavages are as important as income , if not more , as determinants of political preferences and attitudes . -the first step to address racial politics is to understand the origin and consequences of racial animosity , even if it means uncovering unpleasant truths . -this is precisely what a large amount of research in economics , sociology , psychology and political science has done for the us . diff --git a/training/dtrain/examples/parallelized/work/weights.0 b/training/dtrain/examples/parallelized/work/weights.0 deleted file mode 100644 index ddd595a8..00000000 --- a/training/dtrain/examples/parallelized/work/weights.0 +++ /dev/null @@ -1,12 +0,0 @@ -LanguageModel 0.7004298992212881 -PhraseModel_2 0.5576194336478857 -PhraseModel_1 0.41787318415343155 -PhraseModel_4 -0.46728502545635164 -PhraseModel_3 -0.029839521598455515 -Glue -0.05760000000000068 -PhraseModel_6 -0.2716499999999978 -PhraseModel_0 -0.20831031065605327 -LanguageModel_OOV -0.15205000000000077 -PassThrough -0.1846500000000006 -WordPenalty 0.09636994553433414 -PhraseModel_5 -0.026900000000000257 diff --git a/training/dtrain/examples/parallelized/work/weights.0.0 b/training/dtrain/examples/parallelized/work/weights.0.0 deleted file mode 100644 index c9370b18..00000000 --- a/training/dtrain/examples/parallelized/work/weights.0.0 +++ /dev/null @@ -1,12 +0,0 @@ -WordPenalty -0.0079041595706392243 -LanguageModel 0.44781580828279532 -LanguageModel_OOV -0.04010000000000042 -Glue 0.26629999999999948 -PhraseModel_0 -0.19299677809125185 -PhraseModel_1 0.71321026861732773 -PhraseModel_2 0.85195540993310537 -PhraseModel_3 -0.43986310822842656 -PhraseModel_4 -0.44802855630415955 -PhraseModel_5 -0.053800000000000514 -PhraseModel_6 -0.17879999999999835 -PassThrough -0.14770000000000036 diff --git a/training/dtrain/examples/parallelized/work/weights.0.1 b/training/dtrain/examples/parallelized/work/weights.0.1 deleted file mode 100644 index 8fad3de8..00000000 --- a/training/dtrain/examples/parallelized/work/weights.0.1 +++ /dev/null @@ -1,12 +0,0 @@ -WordPenalty 0.080605055841244472 -LanguageModel -0.026571720531022844 -LanguageModel_OOV -0.30024999999999141 -Glue -0.26989999999999842 -PhraseModel_2 0.92000295209089566 -PhraseModel_1 0.67450748692470841 -PhraseModel_4 -0.5920000014976784 -PhraseModel_3 -0.36402437203127397 -PhraseModel_6 -0.28754999999999603 -PhraseModel_0 -0.32076244202907672 -PassThrough -0.33284999999999004 -PhraseModel_5 -0.026900000000000257 diff --git a/training/dtrain/examples/parallelized/work/weights.1 b/training/dtrain/examples/parallelized/work/weights.1 deleted file mode 100644 index 03058a16..00000000 --- a/training/dtrain/examples/parallelized/work/weights.1 +++ /dev/null @@ -1,12 +0,0 @@ -PhraseModel_2 0.8365578543552836 -PhraseModel_4 -0.5900840266009169 -PhraseModel_1 0.5312000609786991 -PhraseModel_0 -0.3872342271319619 -PhraseModel_3 -0.3728279676912084 -Glue -0.2938500000000036 -PhraseModel_6 -0.2803499999999967 -PassThrough -0.25014999999999626 -LanguageModel_OOV -0.21754999999999702 -LanguageModel 0.07306061161169894 -WordPenalty 0.09576193325966899 -PhraseModel_5 -0.026900000000000257 diff --git a/training/dtrain/examples/parallelized/work/weights.1.0 b/training/dtrain/examples/parallelized/work/weights.1.0 deleted file mode 100644 index 6a6a65c1..00000000 --- a/training/dtrain/examples/parallelized/work/weights.1.0 +++ /dev/null @@ -1,11 +0,0 @@ -WordPenalty 0.20064405063930751 -LanguageModel 0.9530439901597807 -LanguageModel_OOV -0.26400000000000112 -Glue -0.38150000000000084 -PhraseModel_0 -0.22362384322085468 -PhraseModel_1 0.12253609968953538 -PhraseModel_2 0.26328345736266612 -PhraseModel_3 0.38018406503151553 -PhraseModel_4 -0.48654149460854373 -PhraseModel_6 -0.36449999999999722 -PassThrough -0.22160000000000085 diff --git a/training/dtrain/examples/parallelized/work/weights.1.1 b/training/dtrain/examples/parallelized/work/weights.1.1 deleted file mode 100644 index f56ea4a2..00000000 --- a/training/dtrain/examples/parallelized/work/weights.1.1 +++ /dev/null @@ -1,12 +0,0 @@ -WordPenalty 0.1109188106780935 -LanguageModel 0.17269294375442074 -LanguageModel_OOV -0.13485000000000266 -Glue -0.3178000000000088 -PhraseModel_2 0.75311275661967159 -PhraseModel_1 0.38789263503268989 -PhraseModel_4 -0.58816805170415531 -PhraseModel_3 -0.38163156335114284 -PhraseModel_6 -0.27314999999999739 -PhraseModel_0 -0.45370601223484697 -PassThrough -0.16745000000000249 -PhraseModel_5 -0.026900000000000257 diff --git a/training/dtrain/examples/standard/README b/training/dtrain/examples/standard/README deleted file mode 100644 index ce37d31a..00000000 --- a/training/dtrain/examples/standard/README +++ /dev/null @@ -1,2 +0,0 @@ -Call `dtrain` from this folder with ../../dtrain -c dtrain.ini . - diff --git a/training/dtrain/examples/standard/cdec.ini b/training/dtrain/examples/standard/cdec.ini deleted file mode 100644 index 3330dd71..00000000 --- a/training/dtrain/examples/standard/cdec.ini +++ /dev/null @@ -1,27 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -scfg_max_span_limit=15 -intersection_strategy=cube_pruning -cubepruning_pop_limit=200 -grammar=nc-wmt11.grammar.gz -feature_function=WordPenalty -feature_function=KLanguageModel ./nc-wmt11.en.srilm.gz -# all currently working feature functions for translation: -# (with those features active that were used in the ACL paper) -#feature_function=ArityPenalty -#feature_function=CMR2008ReorderingFeatures -#feature_function=Dwarf -#feature_function=InputIndicator -#feature_function=LexNullJump -#feature_function=NewJump -#feature_function=NgramFeatures -#feature_function=NonLatinCount -#feature_function=OutputIndicator -feature_function=RuleIdentityFeatures -feature_function=RuleSourceBigramFeatures -feature_function=RuleTargetBigramFeatures -feature_function=RuleShape -feature_function=LexicalFeatures 1 1 1 -#feature_function=SourceSpanSizeFeatures -#feature_function=SourceWordPenalty -#feature_function=SpanFeatures diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini deleted file mode 100644 index a515db02..00000000 --- a/training/dtrain/examples/standard/dtrain.ini +++ /dev/null @@ -1,27 +0,0 @@ -#input=./nc-wmt11.de.gz -#refs=./nc-wmt11.en.gz -bitext=./nc-wmt11.gz -output=- # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=avg # output average (over epochs) weight vector -decoder_config=./cdec.ini # config for cdec -# weights for these features will be printed on each iteration -print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -# newer version of the grammar extractor use different feature names: -#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV -stop_after=10 # stop epoch after 10 inputs - -# interesting stuff -epochs=3 # run over input 3 times -k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 -scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) -gamma=0 # use SVM reg -sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only unique entries in kbest (surface form) -pair_sampling=XYX # -hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0 # minimum distance in BLEU (here: > 0) -loss_margin=0 # update if correctly ranked, but within this margin -repeat=1 # repeat training on a kbest list 1 times -#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output deleted file mode 100644 index 2460cfbb..00000000 --- a/training/dtrain/examples/standard/expected-output +++ /dev/null @@ -1,123 +0,0 @@ - cdec cfg './cdec.ini' -Loading the LM will be faster if you build a binary file. -Reading ./nc-wmt11.en.srilm.gz -----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 -**************************************************************************************************** - Example feature: Shape_S00000_T00000 -T=1 I=1 D=1 -Seeding random number sequence to 2327685089 - -dtrain -Parameters: - k 100 - N 4 - T 3 - batch 0 - scorer 'fixed_stupid_bleu' - sample from 'kbest' - filter 'uniq' - learning rate 0.1 - gamma 0 - loss margin 0 - faster perceptron 1 - pairs 'XYX' - hi lo 0.1 - pair threshold 0 - select weights 'avg' - l1 reg 0 'none' - pclr no - max pairs 4294967295 - repeat 1 - cdec cfg './cdec.ini' - input './nc-wmt11.gz' - output '-' - stop_after 10 -(a dot represents 10 inputs) -Iteration #1 of 3. - . 10 -Stopping after 10 input sentences. -WEIGHTS - Glue = +6.9 - WordPenalty = -46.426 - LanguageModel = +535.12 - LanguageModel_OOV = -123.5 - PhraseModel_0 = -160.73 - PhraseModel_1 = -350.13 - PhraseModel_2 = -187.81 - PhraseModel_3 = +172.04 - PhraseModel_4 = +0.90108 - PhraseModel_5 = +21.6 - PhraseModel_6 = +67.2 - PassThrough = -149.7 - --- - 1best avg score: 0.23327 (+0.23327) - 1best avg model score: -9084.9 (-9084.9) - avg # pairs: 780.7 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 1389 - avg list sz: 91.3 - avg f count: 146.2 -(time 0.37 min, 2.2 s/S) - -Iteration #2 of 3. - . 10 -WEIGHTS - Glue = -43 - WordPenalty = -22.019 - LanguageModel = +591.53 - LanguageModel_OOV = -252.1 - PhraseModel_0 = -120.21 - PhraseModel_1 = -43.589 - PhraseModel_2 = +73.53 - PhraseModel_3 = +113.7 - PhraseModel_4 = -223.81 - PhraseModel_5 = +64 - PhraseModel_6 = +54.8 - PassThrough = -331.1 - --- - 1best avg score: 0.29568 (+0.062413) - 1best avg model score: -15879 (-6794.1) - avg # pairs: 566.1 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 1931 - avg list sz: 91.3 - avg f count: 139.89 -(time 0.33 min, 2 s/S) - -Iteration #3 of 3. - . 10 -WEIGHTS - Glue = -44.3 - WordPenalty = -131.85 - LanguageModel = +230.91 - LanguageModel_OOV = -285.4 - PhraseModel_0 = -194.27 - PhraseModel_1 = -294.83 - PhraseModel_2 = -92.043 - PhraseModel_3 = -140.24 - PhraseModel_4 = +85.613 - PhraseModel_5 = +238.1 - PhraseModel_6 = +158.7 - PassThrough = -359.6 - --- - 1best avg score: 0.37375 (+0.078067) - 1best avg model score: -14519 (+1359.7) - avg # pairs: 545.4 - avg # rank err: 0 (meaningless) - avg # margin viol: 0 - k-best loss imp: 100% - non0 feature count: 2218 - avg list sz: 91.3 - avg f count: 137.77 -(time 0.35 min, 2.1 s/S) - -Writing weights file to '-' ... -done - ---- -Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.37375]. -This took 1.05 min. diff --git a/training/dtrain/examples/standard/nc-wmt11.de.gz b/training/dtrain/examples/standard/nc-wmt11.de.gz deleted file mode 100644 index 0741fd92..00000000 Binary files a/training/dtrain/examples/standard/nc-wmt11.de.gz and /dev/null differ diff --git a/training/dtrain/examples/standard/nc-wmt11.en.gz b/training/dtrain/examples/standard/nc-wmt11.en.gz deleted file mode 100644 index 1c0bd401..00000000 Binary files a/training/dtrain/examples/standard/nc-wmt11.en.gz and /dev/null differ diff --git a/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz b/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz deleted file mode 100644 index 7ce81057..00000000 Binary files a/training/dtrain/examples/standard/nc-wmt11.en.srilm.gz and /dev/null differ diff --git a/training/dtrain/examples/standard/nc-wmt11.grammar.gz b/training/dtrain/examples/standard/nc-wmt11.grammar.gz deleted file mode 100644 index ce4024a1..00000000 Binary files a/training/dtrain/examples/standard/nc-wmt11.grammar.gz and /dev/null differ diff --git a/training/dtrain/examples/standard/nc-wmt11.gz b/training/dtrain/examples/standard/nc-wmt11.gz deleted file mode 100644 index c39c5aef..00000000 Binary files a/training/dtrain/examples/standard/nc-wmt11.gz and /dev/null differ diff --git a/training/dtrain/examples/toy/cdec.ini b/training/dtrain/examples/toy/cdec.ini deleted file mode 100644 index e6c19abe..00000000 --- a/training/dtrain/examples/toy/cdec.ini +++ /dev/null @@ -1,4 +0,0 @@ -formalism=scfg -add_pass_through_rules=true -grammar=grammar.gz -#add_extra_pass_through_features=6 diff --git a/training/dtrain/examples/toy/dtrain.ini b/training/dtrain/examples/toy/dtrain.ini deleted file mode 100644 index ef956df7..00000000 --- a/training/dtrain/examples/toy/dtrain.ini +++ /dev/null @@ -1,13 +0,0 @@ -decoder_config=cdec.ini -input=src -refs=tgt -output=- -print_weights=logp shell_rule house_rule small_rule little_rule PassThrough PassThrough_1 PassThrough_2 PassThrough_3 PassThrough_4 PassThrough_5 PassThrough_6 -k=4 -N=4 -epochs=2 -scorer=bleu -sample_from=kbest -filter=uniq -pair_sampling=all -learning_rate=1 diff --git a/training/dtrain/examples/toy/expected-output b/training/dtrain/examples/toy/expected-output deleted file mode 100644 index 1da2aadd..00000000 --- a/training/dtrain/examples/toy/expected-output +++ /dev/null @@ -1,77 +0,0 @@ -Warning: hi_lo only works with pair_sampling XYX. - cdec cfg 'cdec.ini' -Seeding random number sequence to 1664825829 - -dtrain -Parameters: - k 4 - N 4 - T 2 - scorer 'bleu' - sample from 'kbest' - filter 'uniq' - learning rate 1 - gamma 0 - loss margin 0 - pairs 'all' - pair threshold 0 - select weights 'last' - l1 reg 0 'none' - max pairs 4294967295 - cdec cfg 'cdec.ini' - input 'src' - refs 'tgt' - output '-' -(a dot represents 10 inputs) -Iteration #1 of 2. - 2 -WEIGHTS - logp = +0 - shell_rule = -1 - house_rule = +2 - small_rule = -2 - little_rule = +3 - PassThrough = -5 - --- - 1best avg score: 0.5 (+0.5) - 1best avg model score: 2.5 (+2.5) - avg # pairs: 4 - avg # rank err: 1.5 - avg # margin viol: 0 - non0 feature count: 6 - avg list sz: 4 - avg f count: 2.875 -(time 0 min, 0 s/S) - -Iteration #2 of 2. - 2 -WEIGHTS - logp = +0 - shell_rule = -1 - house_rule = +2 - small_rule = -2 - little_rule = +3 - PassThrough = -5 - --- - 1best avg score: 1 (+0.5) - 1best avg model score: 5 (+2.5) - avg # pairs: 5 - avg # rank err: 0 - avg # margin viol: 0 - non0 feature count: 6 - avg list sz: 4 - avg f count: 3 -(time 0 min, 0 s/S) - -Writing weights file to '-' ... -house_rule 2 -little_rule 3 -Glue -4 -PassThrough -5 -small_rule -2 -shell_rule -1 -done - ---- -Best iteration: 2 [SCORE 'bleu'=1]. -This took 0 min. diff --git a/training/dtrain/examples/toy/grammar.gz b/training/dtrain/examples/toy/grammar.gz deleted file mode 100644 index 8eb0d29e..00000000 Binary files a/training/dtrain/examples/toy/grammar.gz and /dev/null differ diff --git a/training/dtrain/examples/toy/src b/training/dtrain/examples/toy/src deleted file mode 100644 index 87e39ef2..00000000 --- a/training/dtrain/examples/toy/src +++ /dev/null @@ -1,2 +0,0 @@ -ich sah ein kleines haus -ich fand ein kleines haus diff --git a/training/dtrain/examples/toy/tgt b/training/dtrain/examples/toy/tgt deleted file mode 100644 index 174926b3..00000000 --- a/training/dtrain/examples/toy/tgt +++ /dev/null @@ -1,2 +0,0 @@ -i saw a little house -i found a little house diff --git a/training/dtrain/kbestget.h b/training/dtrain/kbestget.h deleted file mode 100644 index 85252db3..00000000 --- a/training/dtrain/kbestget.h +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef _DTRAIN_KBESTGET_H_ -#define _DTRAIN_KBESTGET_H_ - -#include "kbest.h" - -namespace dtrain -{ - - -struct KBestGetter : public HypSampler -{ - const unsigned k_; - const string filter_type_; - vector s_; - unsigned src_len_; - - KBestGetter(const unsigned k, const string filter_type) : - k_(k), filter_type_(filter_type) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - KBestScored(*hg); - } - - vector* GetSamples() { return &s_; } - - void - KBestScored(const Hypergraph& forest) - { - if (filter_type_ == "uniq") { - KBestUnique(forest); - } else if (filter_type_ == "not") { - KBestNoFilter(forest); - } - } - - void - KBestUnique(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations, ESentenceTraversal, - KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations, ESentenceTraversal, KBest::FilterUnique, - prob_t, EdgeProb>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } - - void - KBestNoFilter(const Hypergraph& forest) - { - s_.clear(); sz_ = f_count_ = 0; - KBest::KBestDerivations, ESentenceTraversal> kbest(forest, k_); - for (unsigned i = 0; i < k_; ++i) { - const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d = - kbest.LazyKthBest(forest.nodes_.size() - 1, i); - if (!d) break; - ScoredHyp h; - h.w = d->yield; - h.f = d->feature_values; - h.model = log(d->score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - } -}; - - -} // namespace - -#endif - diff --git a/training/dtrain/ksampler.h b/training/dtrain/ksampler.h deleted file mode 100644 index 29dab667..00000000 --- a/training/dtrain/ksampler.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _DTRAIN_KSAMPLER_H_ -#define _DTRAIN_KSAMPLER_H_ - -#include "hg_sampler.h" - -namespace dtrain -{ - - -bool -cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b) -{ - return a.model > b.model; -} - -struct KSampler : public HypSampler -{ - const unsigned k_; - vector s_; - MT19937* prng_; - score_t (*scorer)(NgramCounts&, const unsigned, const unsigned, unsigned, vector); - unsigned src_len_; - - explicit KSampler(const unsigned k, MT19937* prng) : - k_(k), prng_(prng) {} - - virtual void - NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) - { - src_len_ = smeta.GetSourceLength(); - ScoredSamples(*hg); - } - - vector* GetSamples() { return &s_; } - - void ScoredSamples(const Hypergraph& forest) { - s_.clear(); sz_ = f_count_ = 0; - std::vector samples; - HypergraphSampler::sample_hypotheses(forest, k_, prng_, &samples); - for (unsigned i = 0; i < k_; ++i) { - ScoredHyp h; - h.w = samples[i].words; - h.f = samples[i].fmap; - h.model = log(samples[i].model_score); - h.rank = i; - h.score = scorer_->Score(h.w, *ref_, i, src_len_); - s_.push_back(h); - sz_++; - f_count_ += h.f.size(); - } - sort(s_.begin(), s_.end(), cmp_hyp_by_model_d); - for (unsigned i = 0; i < s_.size(); i++) s_[i].rank = i; - } -}; - - -} // namespace - -#endif - diff --git a/training/dtrain/pairsampling.h b/training/dtrain/pairsampling.h deleted file mode 100644 index 1a3c498c..00000000 --- a/training/dtrain/pairsampling.h +++ /dev/null @@ -1,141 +0,0 @@ -#ifndef _DTRAIN_PAIRSAMPLING_H_ -#define _DTRAIN_PAIRSAMPLING_H_ - -namespace dtrain -{ - - -bool -accept_pair(score_t a, score_t b, score_t threshold) -{ - if (fabs(a - b) < threshold) return false; - return true; -} - -bool -cmp_hyp_by_score_d(ScoredHyp a, ScoredHyp b) -{ - return a.score > b.score; -} - -inline void -all_pairs(vector* s, vector >& training, score_t threshold, unsigned max, bool misranked_only, float _unused=1) -{ - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sz = s->size(); - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } - } - if (b) break; - } -} - -/* - * multipartite ranking - * sort (descending) by bleu - * compare top X to middle Y and low X - * cmp middle Y to low X - */ - -inline void -partXYX(vector* s, vector >& training, score_t threshold, unsigned max, bool misranked_only, float hi_lo) -{ - unsigned sz = s->size(); - if (sz < 2) return; - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned sep = round(sz*hi_lo); - unsigned sep_hi = sep; - if (sz > 4) while (sep_hi < sz && (*s)[sep_hi-1].score == (*s)[sep_hi].score) ++sep_hi; - else sep_hi = 1; - bool b = false; - unsigned count = 0; - for (unsigned i = 0; i < sep_hi; i++) { - for (unsigned j = sep_hi; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) { - b = true; - break; - } - } - if (b) break; - } - unsigned sep_lo = sz-sep; - while (sep_lo > 0 && (*s)[sep_lo-1].score == (*s)[sep_lo].score) --sep_lo; - for (unsigned i = sep_hi; i < sz-sep_lo; i++) { - for (unsigned j = sz-sep_lo; j < sz; j++) { - if (misranked_only && !((*s)[i].model <= (*s)[j].model)) continue; - if (threshold > 0) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) - training.push_back(make_pair((*s)[i], (*s)[j])); - } else { - if ((*s)[i].score != (*s)[j].score) - training.push_back(make_pair((*s)[i], (*s)[j])); - } - if (++count == max) return; - } - } -} - -/* - * pair sampling as in - * 'Tuning as Ranking' (Hopkins & May, 2011) - * count = 5000 - * threshold = 5% BLEU (0.05 for param 3) - * cut = top 50 - */ -bool -_PRO_cmp_pair_by_diff_d(pair a, pair b) -{ - return (fabs(a.first.score - a.second.score)) > (fabs(b.first.score - b.second.score)); -} -inline void -PROsampling(vector* s, vector >& training, score_t threshold, unsigned max, bool _unused=false, float _also_unused=0) -{ - sort(s->begin(), s->end(), cmp_hyp_by_score_d); - unsigned max_count = 5000, count = 0, sz = s->size(); - bool b = false; - for (unsigned i = 0; i < sz-1; i++) { - for (unsigned j = i+1; j < sz; j++) { - if (accept_pair((*s)[i].score, (*s)[j].score, threshold)) { - training.push_back(make_pair((*s)[i], (*s)[j])); - if (++count == max_count) { - b = true; - break; - } - } - } - if (b) break; - } - if (training.size() > 50) { - sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff_d); - training.erase(training.begin()+50, training.end()); - } - return; -} - - -} // namespace - -#endif - diff --git a/training/dtrain/score.cc b/training/dtrain/score.cc deleted file mode 100644 index 127f34d2..00000000 --- a/training/dtrain/score.cc +++ /dev/null @@ -1,283 +0,0 @@ -#include "score.h" - -namespace dtrain -{ - - -/* - * bleu - * - * as in "BLEU: a Method for Automatic Evaluation - * of Machine Translation" - * (Papineni et al. '02) - * - * NOTE: 0 if for one n \in {1..N} count is 0 - */ -score_t -BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len) -{ - if (hyp_len == 0 || ref_len == 0) return 0.; - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) return 0.; - sum += v[i] * log((score_t)counts.clipped_[i]/counts.sum_[i]); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -score_t -BleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - return Bleu(counts, hyp_len, ref_len); -} - -/* - * 'stupid' bleu - * - * as in "ORANGE: a Method for Evaluating - * Automatic Evaluation Metrics - * for Machine Translation" - * (Lin & Och '04) - * - * NOTE: 0 iff no 1gram match ('grounded') - */ -score_t -StupidBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0, add = 0; - for (unsigned i = 0; i < M; i++) { - if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; - if (i == 1) add = 1; - sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); - } - return brevity_penalty(hyp_len, ref_len) * exp(sum); -} - -/* - * fixed 'stupid' bleu - * - * as in "Optimizing for Sentence-Level BLEU+1 - * Yields Short Translations" - * (Nakov et al. '12) - */ -score_t -FixedStupidBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0, add = 0; - for (unsigned i = 0; i < M; i++) { - if (i == 0 && (counts.sum_[i] == 0 || counts.clipped_[i] == 0)) return 0.; - if (i == 1) add = 1; - sum += v[i] * log(((score_t)counts.clipped_[i] + add)/((counts.sum_[i] + add))); - } - return brevity_penalty(hyp_len, ref_len+1) * exp(sum); // <- fix -} - -/* - * smooth bleu - * - * as in "An End-to-End Discriminative Approach - * to Machine Translation" - * (Liang et al. '06) - * - * NOTE: max is 0.9375 (with N=4) - */ -score_t -SmoothBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - vector i_bleu; - for (unsigned i = 0; i < M; i++) i_bleu.push_back(0.); - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) { - break; - } else { - score_t i_ng = log((score_t)counts.clipped_[i]/counts.sum_[i]); - for (unsigned j = i; j < M; j++) { - i_bleu[j] += (1/((score_t)j+1)) * i_ng; - } - } - sum += exp(i_bleu[i])/pow(2.0, (double)(N_-i)); - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' bleu - * - * sum up Ngram precisions - */ -score_t -SumBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += ((score_t)counts.clipped_[i]/counts.sum_[i])/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (exp) bleu - * - * sum up exp(Ngram precisions) - */ -score_t -SumExpBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - if (ref_len < N_) M = ref_len; - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(((score_t)counts.clipped_[i]/counts.sum_[i]))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * 'sum' (whatever) bleu - * - * sum up exp(weight * log(Ngram precisions)) - */ -score_t -SumWhateverBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned /*rank*/, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (hyp_len == 0 || ref_len == 0) return 0.; - NgramCounts counts = make_ngram_counts(hyp, ref, N_); - unsigned M = N_; - vector v = w_; - if (ref_len < N_) { - M = ref_len; - for (unsigned i = 0; i < M; i++) v[i] = 1/((score_t)M); - } - score_t sum = 0.; - unsigned j = 1; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || counts.clipped_[i] == 0) break; - sum += exp(v[i] * log(((score_t)counts.clipped_[i]/counts.sum_[i])))/pow(2.0, (double) (N_-j+1)); - j++; - } - return brevity_penalty(hyp_len, ref_len) * sum; -} - -/* - * approx. bleu - * - * as in "Online Large-Margin Training of Syntactic - * and Structural Translation Features" - * (Chiang et al. '08) - * - * NOTE: Needs some more code in dtrain.cc . - * No scaling by src len. - */ -score_t -ApproxBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned rank, const unsigned src_len) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - score_t score = 0.; - NgramCounts counts(N_); - if (hyp_len > 0) { - counts = make_ngram_counts(hyp, ref, N_); - NgramCounts tmp = glob_onebest_counts_ + counts; - score = Bleu(tmp, hyp_len, ref_len); - } - if (rank == 0) { // 'context of 1best translations' - glob_onebest_counts_ += counts; - glob_onebest_counts_ *= discount_; - glob_hyp_len_ = discount_ * (glob_hyp_len_ + hyp_len); - glob_ref_len_ = discount_ * (glob_ref_len_ + ref_len); - glob_src_len_ = discount_ * (glob_src_len_ + src_len); - } - return score; -} - -/* - * Linear (Corpus) Bleu - * - * as in "Lattice Minimum Bayes-Risk Decoding - * for Statistical Machine Translation" - * (Tromble et al. '08) - * - */ -score_t -LinearBleuScorer::Score(const vector& hyp, const vector& ref, - const unsigned rank, const unsigned /*src_len*/) -{ - unsigned hyp_len = hyp.size(), ref_len = ref.size(); - if (ref_len == 0) return 0.; - unsigned M = N_; - if (ref_len < N_) M = ref_len; - NgramCounts counts(M); - if (hyp_len > 0) - counts = make_ngram_counts(hyp, ref, M); - score_t ret = 0.; - for (unsigned i = 0; i < M; i++) { - if (counts.sum_[i] == 0 || onebest_counts_.sum_[i] == 0) break; - ret += counts.sum_[i]/onebest_counts_.sum_[i]; - } - ret = -(hyp_len/(score_t)onebest_len_) + (1./M) * ret; - if (rank == 0) { - onebest_len_ += hyp_len; - onebest_counts_ += counts; - } - return ret; -} - - -} // namespace - -- cgit v1.2.3