From d19af7519f57589373569cc592ae49b7cba55e15 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Thu, 26 Apr 2012 09:31:34 +0200 Subject: merge older changes, more polishing --- dtrain/test/example/dtrain.ini | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'dtrain/test') diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini index 68173e11..66be6bf2 100644 --- a/dtrain/test/example/dtrain.ini +++ b/dtrain/test/example/dtrain.ini @@ -1,20 +1,20 @@ input=test/example/nc-wmt11.1k.gz # use '-' for stdin -output=- # a weights file or stdout -decoder_config=test/example/cdec.ini # ini for cdec -# these will be printed on each iteration +output=weights.gz # a weights file (add .gz for gzip compression) or STDOUT '-' +decoder_config=test/example/cdec.ini # config for cdec +# weights for these features will be printed on each iteration print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough tmp=/tmp -stop_after=10 # stop iteration after 10 inputs +stop_after=100 # stop epoch after 10 inputs # interesting stuff -epochs=3 # run over input 3 times -k=200 # use 100best lists -N=4 # optimize (approx) BLEU4 +epochs=100 # run over input 3 times +k=100 # use 100best lists +N=4 # optimize (approx) BLEU4 learning_rate=0.0001 # learning rate -gamma=0.00001 # use SVM reg -scorer=stupid_bleu # use stupid BLEU+1 approx. +gamma=0 # use SVM reg +scorer=smooth_bleu # use smooth BLEU of (Liang et al. '06) sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only uniq entries in kbest +filter=uniq # only unique entries in kbest (surface form) pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10 -pair_threshold=0 # minimum distance in BLEU +pair_threshold=0 # minimum distance in BLEU (this will still only use pairs with diff > 0) select_weights=last # just output last weights -- cgit v1.2.3