summaryrefslogtreecommitdiff
path: root/training/dtrain/examples/standard/dtrain.ini
diff options
context:
space:
mode:
Diffstat (limited to 'training/dtrain/examples/standard/dtrain.ini')
-rw-r--r--training/dtrain/examples/standard/dtrain.ini11
1 files changed, 7 insertions, 4 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 23e94285..fc83f08e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,5 +1,6 @@
-input=./nc-wmt11.de.gz
-refs=./nc-wmt11.en.gz
+#input=./nc-wmt11.de.gz
+#refs=./nc-wmt11.en.gz
+bitext=./nc-wmt11.gz
output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
@@ -10,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
-epochs=2 # run over input 2 times
+epochs=3 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron)
+learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
@@ -22,3 +23,5 @@ pair_sampling=XYX #
hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (here: > 0)
loss_margin=0 # update if correctly ranked, but within this margin
+repeat=1 # repeat training on a kbest list 1 times
+#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists