From a8ea0a66b798326061bc9f0da153b96b730130f1 Mon Sep 17 00:00:00 2001 From: Patrick Simianer Date: Tue, 12 Nov 2013 18:36:03 +0100 Subject: implemented batch tuning --- training/dtrain/examples/standard/dtrain.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'training/dtrain/examples/standard/dtrain.ini') diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index 7dbb4ff0..4d096dfb 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr stop_after=10 # stop epoch after 10 inputs # interesting stuff -epochs=3 # run over input 3 times +epochs=100 # run over input 3 times k=100 # use 100best lists N=4 # optimize (approx) BLEU4 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) +learning_rate=0.0001 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) -- cgit v1.2.3