diff options
Diffstat (limited to 'training/dtrain/examples')
| -rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 6 | 
1 files changed, 4 insertions, 2 deletions
| diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index 4d096dfb..ef022469 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -11,11 +11,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr  stop_after=10 # stop epoch after 10 inputs  # interesting stuff -epochs=100                 # run over input 3 times +epochs=3                 # run over input 3 times  k=100                    # use 100best lists  N=4                      # optimize (approx) BLEU4  scorer=fixed_stupid_bleu # use 'stupid' BLEU+1 -learning_rate=0.0001       # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron) +learning_rate=0.0001     # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)  gamma=0                  # use SVM reg  sample_from=kbest        # use kbest lists (as opposed to forest)  filter=uniq              # only unique entries in kbest (surface form) @@ -23,3 +23,5 @@ pair_sampling=XYX        #  hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here  pair_threshold=0         # minimum distance in BLEU (here: > 0)  loss_margin=0            # update if correctly ranked, but within this margin +repeat=1                 # repeat training on a kbest list 1 times  +#batch=true              # batch tuning, update after accumulating over all sentences and all kbest lists | 
