summaryrefslogtreecommitdiff
path: root/dtrain/test/example
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-26 11:36:49 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-26 11:36:49 +0200
commit28806638345e60bd442bf5fa8e7471f9115b0296 (patch)
tree1c304a800ac46f7d0f42ffdfb227571c80b56700 /dtrain/test/example
parent273abce33542fb991e7188c14cd6419870cdb2bd (diff)
remove obsolete stuff
Diffstat (limited to 'dtrain/test/example')
-rw-r--r--dtrain/test/example/cdec.ini4
-rw-r--r--dtrain/test/example/dtrain.ini12
2 files changed, 8 insertions, 8 deletions
diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini
index fe5ca759..6642107f 100644
--- a/dtrain/test/example/cdec.ini
+++ b/dtrain/test/example/cdec.ini
@@ -5,7 +5,8 @@ intersection_strategy=cube_pruning
cubepruning_pop_limit=30
feature_function=WordPenalty
feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz
-# all currently working feature function for translation:
+# all currently working feature functions for translation:
+# (with those features active that were used in the ACL paper)
#feature_function=ArityPenalty
#feature_function=CMR2008ReorderingFeatures
#feature_function=Dwarf
@@ -21,4 +22,3 @@ feature_function=RuleShape
#feature_function=SourceSpanSizeFeatures
#feature_function=SourceWordPenalty
#feature_function=SpanFeatures
-# ^^^ features active that were used in the ACL paper
diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini
index 66be6bf2..b59250f3 100644
--- a/dtrain/test/example/dtrain.ini
+++ b/dtrain/test/example/dtrain.ini
@@ -1,18 +1,18 @@
-input=test/example/nc-wmt11.1k.gz # use '-' for stdin
+input=test/example/nc-wmt11.1k.gz # use '-' for STDIN
output=weights.gz # a weights file (add .gz for gzip compression) or STDOUT '-'
decoder_config=test/example/cdec.ini # config for cdec
# weights for these features will be printed on each iteration
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
tmp=/tmp
-stop_after=100 # stop epoch after 10 inputs
+stop_after=100 # stop epoch after 100 inputs
# interesting stuff
-epochs=100 # run over input 3 times
+epochs=3 # run over input 3 times
k=100 # use 100best lists
-N=4 # optimize (approx) BLEU4
+N=4 # optimize (approx) BLEU4
+scorer=stupid_bleu # use 'stupid' BLEU+1
learning_rate=0.0001 # learning rate
-gamma=0 # use SVM reg
-scorer=smooth_bleu # use smooth BLEU of (Liang et al. '06)
+gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10