diff options
author | Paul Baltescu <pauldb89@gmail.com> | 2013-11-23 17:33:47 +0000 |
---|---|---|
committer | Paul Baltescu <pauldb89@gmail.com> | 2013-11-23 17:33:47 +0000 |
commit | 072c4bb1edde483b87b93bc6f4eec36fc8a21008 (patch) | |
tree | 6ceaa6ae1e08df9e523282740b14f4857236297c /training/dtrain/examples/standard/dtrain.ini | |
parent | 7e90b8ea10904f9b83f4e77e14c7396a3e6f7d5d (diff) | |
parent | 9e80389b9763aa4f7f626ec71b561ccf6948d3ad (diff) |
Merge branch 'master' of https://github.com/redpony/cdec
Diffstat (limited to 'training/dtrain/examples/standard/dtrain.ini')
-rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index 23e94285..fc83f08e 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -1,5 +1,6 @@ -input=./nc-wmt11.de.gz -refs=./nc-wmt11.en.gz +#input=./nc-wmt11.de.gz +#refs=./nc-wmt11.en.gz +bitext=./nc-wmt11.gz output=- # a weights file (add .gz for gzip compression) or STDOUT '-' select_weights=VOID # output average (over epochs) weight vector decoder_config=./cdec.ini # config for cdec @@ -10,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr stop_after=10 # stop epoch after 10 inputs # interesting stuff -epochs=2 # run over input 2 times +epochs=3 # run over input 3 times k=100 # use 100best lists N=4 # optimize (approx) BLEU4 scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) +learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) gamma=0 # use SVM reg sample_from=kbest # use kbest lists (as opposed to forest) filter=uniq # only unique entries in kbest (surface form) @@ -22,3 +23,5 @@ pair_sampling=XYX # hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here pair_threshold=0 # minimum distance in BLEU (here: > 0) loss_margin=0 # update if correctly ranked, but within this margin +repeat=1 # repeat training on a kbest list 1 times +#batch=true # batch tuning, update after accumulating over all sentences and all kbest lists |