summaryrefslogtreecommitdiff
path: root/training/dtrain/test/example/dtrain.ini
blob: 72d50ca1e9ed4bbcf4beb70fc2d8be1dc7cabf73 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
input=test/example/nc-wmt11.1k.gz    # use '-' for STDIN
output=-                             # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID                  # don't output weights
decoder_config=test/example/cdec.ini # config for cdec
# weights for these features will be printed on each iteration
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
tmp=/tmp
stop_after=10 # stop epoch after 10 inputs

# interesting stuff
epochs=2                # run over input 2 times
k=100                   # use 100best lists
N=4                     # optimize (approx) BLEU4
scorer=stupid_bleu      # use 'stupid' BLEU+1
learning_rate=1.0       # learning rate, don't care if gamma=0 (perceptron)
gamma=0                 # use SVM reg
sample_from=kbest       # use kbest lists (as opposed to forest)
filter=uniq             # only unique entries in kbest (surface form)
pair_sampling=XYX
hi_lo=0.1               # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0        # minimum distance in BLEU (this will still only use pairs with diff > 0)
loss_margin=0