diff options
Diffstat (limited to 'training/dtrain/examples/standard/dtrain.ini')
-rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 29 |
1 files changed, 6 insertions, 23 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index a515db02..f2698007 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -1,27 +1,10 @@ -#input=./nc-wmt11.de.gz -#refs=./nc-wmt11.en.gz -bitext=./nc-wmt11.gz +bitext=./nc-wmt11.gz # input bitext output=- # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=avg # output average (over epochs) weight vector decoder_config=./cdec.ini # config for cdec -# weights for these features will be printed on each iteration +iterations=3 # run over input 3 times +k=100 # use 100best lists +N=4 # optimize (approx.) BLEU4 +learning_rate=0.1 # learning rate +error_margin=1.0 # margin for margin perceptron print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough -# newer version of the grammar extractor use different feature names: -#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV -stop_after=10 # stop epoch after 10 inputs -# interesting stuff -epochs=3 # run over input 3 times -k=100 # use 100best lists -N=4 # optimize (approx) BLEU4 -scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) -gamma=0 # use SVM reg -sample_from=kbest # use kbest lists (as opposed to forest) -filter=uniq # only unique entries in kbest (surface form) -pair_sampling=XYX # -hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here -pair_threshold=0 # minimum distance in BLEU (here: > 0) -loss_margin=0 # update if correctly ranked, but within this margin -repeat=1 # repeat training on a kbest list 1 times -#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists |