summaryrefslogtreecommitdiff
path: root/dtrain/test/example
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-27 01:54:47 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-27 01:54:47 +0200
commit0ac66e310d57f9aea5ddeea900c84df08abfe8c2 (patch)
tree1d428ccbe1c63c90499e09e89d314f74fff11047 /dtrain/test/example
parent01110e92e7429df7882879e026b28aa9c89c724d (diff)
fix approx. BLEU of (Chiang et al. '08)
Diffstat (limited to 'dtrain/test/example')
-rw-r--r--dtrain/test/example/dtrain.ini6
1 files changed, 3 insertions, 3 deletions
diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini
index cd2c75e7..2ad44688 100644
--- a/dtrain/test/example/dtrain.ini
+++ b/dtrain/test/example/dtrain.ini
@@ -4,18 +4,18 @@ decoder_config=test/example/cdec.ini # config for cdec
# weights for these features will be printed on each iteration
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
tmp=/tmp
-stop_after=100 # stop epoch after 100 inputs
+stop_after=20 # stop epoch after 20 inputs
# interesting stuff
epochs=3 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
-scorer=approx_bleu # use 'stupid' BLEU+1
+scorer=stupid_bleu # use 'stupid' BLEU+1
learning_rate=0.0001 # learning rate
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
pair_sampling=XYX
-hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10
+hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (this will still only use pairs with diff > 0)
select_weights=VOID # don't output weights