summaryrefslogtreecommitdiff
path: root/training/dtrain/examples/standard/dtrain.ini
diff options
context:
space:
mode:
authorPatrick Simianer <p@simianer.de>2013-11-12 18:36:03 +0100
committerPatrick Simianer <p@simianer.de>2013-11-12 18:36:03 +0100
commita6d8ae2bd3cc2294e17588656e6aa20a96f6fcbc (patch)
tree26b83bb99ef562237b8d187d6782781a3f7e3316 /training/dtrain/examples/standard/dtrain.ini
parenta7a0773b0e299f71409cf4a13d18ea7db5ab3fc1 (diff)
implemented batch tuning
Diffstat (limited to 'training/dtrain/examples/standard/dtrain.ini')
-rw-r--r--training/dtrain/examples/standard/dtrain.ini4
1 files changed, 2 insertions, 2 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 7dbb4ff0..4d096dfb 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
-epochs=3 # run over input 3 times
+epochs=100 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
+learning_rate=0.0001 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)