From fde1df3ee578564f78d7d0eac453dcb3f1740e05 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Thu, 26 Apr 2012 11:36:49 +0200 Subject: remove obsolete stuff --- dtrain/test/example/cdec.ini | 4 ++-- dtrain/test/example/dtrain.ini | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'dtrain/test/example') diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini index fe5ca759..6642107f 100644 --- a/dtrain/test/example/cdec.ini +++ b/dtrain/test/example/cdec.ini @@ -5,7 +5,8 @@ intersection_strategy=cube_pruning cubepruning_pop_limit=30 feature_function=WordPenalty feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz -# all currently working feature function for translation: +# all currently working feature functions for translation: +# (with those features active that were used in the ACL paper) #feature_function=ArityPenalty #feature_function=CMR2008ReorderingFeatures #feature_function=Dwarf @@ -21,4 +22,3 @@ feature_function=RuleShape #feature_function=SourceSpanSizeFeatures #feature_function=SourceWordPenalty #feature_function=SpanFeatures -# ^^^ features active that were used in the ACL paper diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini index 66be6bf2..b59250f3 100644 --- a/dtrain/test/example/dtrain.ini +++ b/dtrain/test/example/dtrain.ini @@ -1,18 +1,18 @@ -input=test/example/nc-wmt11.1k.gz # use '-' for stdin +input=test/example/nc-wmt11.1k.gz # use '-' for STDIN output=weights.gz # a weights file (add .gz for gzip compression) or STDOUT '-' decoder_config=test/example/cdec.ini # config for cdec # weights for these features will be printed on each iteration print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough tmp=/tmp -stop_after=100 # stop epoch after 10 inputs +stop_after=100 # stop epoch after 100 inputs # interesting stuff -epochs=100 # run over input 3 times +epochs=3 # run over input 3 times k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 +N=4 # optimize (approx) BLEU4 +scorer=stupid_bleu # use 'stupid' BLEU+1 learning_rate=0.0001 # learning rate -gamma=0 # use SVM reg -scorer=smooth_bleu # use smooth BLEU of (Liang et al. '06) +gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10 -- cgit v1.2.3