summaryrefslogtreecommitdiff
path: root/training/dtrain/examples/standard/dtrain.ini
diff options
context:
space:
mode:
Diffstat (limited to 'training/dtrain/examples/standard/dtrain.ini')
-rw-r--r--training/dtrain/examples/standard/dtrain.ini8
1 files changed, 4 insertions, 4 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index a05e9c29..e1072d30 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,12 +1,12 @@
input=./nc-wmt11.de.gz
refs=./nc-wmt11.en.gz
output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
-select_weights=avg # output average (over epochs) weight vector
+select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
# weights for these features will be printed on each iteration
-print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV
+print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
# newer version of the grammar extractor use different feature names:
-#print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
+#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
@@ -21,4 +21,4 @@ filter=uniq # only unique entries in kbest (surface form)
pair_sampling=XYX #
hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (here: > 0)
-loss_margin=0
+loss_margin=0 # update if correctly ranked, but within this margin