From fde1df3ee578564f78d7d0eac453dcb3f1740e05 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Thu, 26 Apr 2012 11:36:49 +0200 Subject: remove obsolete stuff --- dtrain/test/example/dtrain.ini | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'dtrain/test/example/dtrain.ini') diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini index 66be6bf2..b59250f3 100644 --- a/dtrain/test/example/dtrain.ini +++ b/dtrain/test/example/dtrain.ini @@ -1,18 +1,18 @@ -input=test/example/nc-wmt11.1k.gz # use '-' for stdin +input=test/example/nc-wmt11.1k.gz # use '-' for STDIN output=weights.gz # a weights file (add .gz for gzip compression) or STDOUT '-' decoder_config=test/example/cdec.ini # config for cdec # weights for these features will be printed on each iteration print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough tmp=/tmp -stop_after=100 # stop epoch after 10 inputs +stop_after=100 # stop epoch after 100 inputs # interesting stuff -epochs=100 # run over input 3 times +epochs=3 # run over input 3 times k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 +N=4 # optimize (approx) BLEU4 +scorer=stupid_bleu # use 'stupid' BLEU+1 learning_rate=0.0001 # learning rate -gamma=0 # use SVM reg -scorer=smooth_bleu # use smooth BLEU of (Liang et al. '06) +gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10 -- cgit v1.2.3