diff options
author | Patrick Simianer <p@simianer.de> | 2013-11-12 20:07:47 +0100 |
---|---|---|
committer | Patrick Simianer <p@simianer.de> | 2013-11-12 20:07:47 +0100 |
commit | 29473017d0f0cdd6f383d253235e2f3388533d13 (patch) | |
tree | 85dc0afdabcbe13659c5f7bf1935132be61d907c /training/dtrain/examples/standard | |
parent | a6d8ae2bd3cc2294e17588656e6aa20a96f6fcbc (diff) |
impl repeat param
Diffstat (limited to 'training/dtrain/examples/standard')
-rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index 4d096dfb..ef022469 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr stop_after=10 # stop epoch after 10 inputs # interesting stuff -epochs=100 # run over input 3 times +epochs=3 # run over input 3 times k=100 # use 100best lists N=4 # optimize (approx) BLEU4 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.0001 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) +learning_rate=0.0001 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) @@ -23,3 +23,5 @@ pair_sampling=XYX # hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here pair_threshold=0 # minimum distance in BLEU (here: > 0) loss_margin=0 # update if correctly ranked, but within this margin +repeat=1 # repeat training on a kbest list 1 times +#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists |