summaryrefslogtreecommitdiff
path: root/training/dtrain/examples/standard/dtrain.ini
blob: fc83f08e12ab7f3ea5804f8c69aab0ef360c4d64 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
#input=./nc-wmt11.de.gz
#refs=./nc-wmt11.en.gz
bitext=./nc-wmt11.gz
output=-                  # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID       # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
# weights for these features will be printed on each iteration
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
# newer version of the grammar extractor use different feature names: 
#print_weights= EgivenFCoherent SampleCountF CountEF MaxLexFgivenE MaxLexEgivenF IsSingletonF IsSingletonFE Glue WordPenalty PassThrough LanguageModel LanguageModel_OOV
stop_after=10 # stop epoch after 10 inputs

# interesting stuff
epochs=3                 # run over input 3 times
k=100                    # use 100best lists
N=4                      # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
learning_rate=0.1        # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0                  # use SVM reg
sample_from=kbest        # use kbest lists (as opposed to forest)
filter=uniq              # only unique entries in kbest (surface form)
pair_sampling=XYX        #
hi_lo=0.1                # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0         # minimum distance in BLEU (here: > 0)
loss_margin=0            # update if correctly ranked, but within this margin
repeat=1                 # repeat training on a kbest list 1 times 
#batch=true              # batch tuning, update after accumulating over all sentences and all kbest lists