From 864a25ebf0c6b9ff0e127f310930834326afbfa0 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 12 Nov 2013 20:39:59 +0100
Subject: fix
---
training/dtrain/dtrain.cc | 36 ++++---
training/dtrain/examples/standard/dtrain.ini | 2 +-
training/dtrain/examples/standard/expected-output | 112 +++++++++++-----------
3 files changed, 80 insertions(+), 70 deletions(-)
(limited to 'training/dtrain')
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 441e2cd7..0a27a068 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -414,6 +414,12 @@ main(int argc, char** argv)
score_t kbest_loss_first, kbest_loss_last = 0.0;
+ for (vector >::iterator it = pairs.begin();
+ it != pairs.end(); it++) {
+ score_t model_diff = it->first.model - it->second.model;
+ kbest_loss_first += max(0.0, -1.0 * model_diff);
+ }
+
for (int ki=0; ki < repeat; ki++) {
score_t kbest_loss = 0.0; // test-k-best
@@ -520,21 +526,22 @@ main(int argc, char** argv)
}
}
- if (ki==0) kbest_loss_first = kbest_loss;
if (ki==repeat-1) { // done
kbest_loss_last = kbest_loss;
- score_t best_score = -1.;
- score_t best_model = -std::numeric_limits::max();
- unsigned best_idx;
- for (unsigned i=0; i < samples->size(); i++) {
- score_t s = lambdas.dot((*samples)[i].f);
- if (s > best_model) {
- best_idx = i;
- best_model = s;
+ if (repeat > 1) {
+ score_t best_score = -1.;
+ score_t best_model = -std::numeric_limits::max();
+ unsigned best_idx;
+ for (unsigned i=0; i < samples->size(); i++) {
+ score_t s = lambdas.dot((*samples)[i].f);
+ if (s > best_model) {
+ best_idx = i;
+ best_model = s;
+ }
}
+ score_sum += (*samples)[best_idx].score;
+ model_sum += best_model;
}
- score_sum += (*samples)[best_idx].score;
- model_sum += best_model;
}
} // repeat
@@ -588,15 +595,14 @@ main(int argc, char** argv)
cerr << _p << " (" << model_diff << ")" << endl;
cerr << " avg # pairs: ";
cerr << _np << npairs/(float)in_sz << endl;
- cerr << " avg # margin viol: ";
- cerr << margin_violations/(float)in_sz << endl;
cerr << " avg # rank err: ";
cerr << rank_errors/(float)in_sz;
if (faster_perceptron) cerr << " (meaningless)";
cerr << endl;
+ cerr << " avg # margin viol: ";
+ cerr << margin_violations/(float)in_sz << endl;
if (batch) cerr << " batch loss: " << batch_loss << endl;
- if (repeat > 1) cerr << " k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
-
+ cerr << " k-best loss imp: " << ((float)kbest_loss_improve/in_sz)*100 << "%" << endl;
cerr << " non0 feature count: " << nonz << endl;
cerr << " avg list sz: " << list_sz/(float)in_sz << endl;
cerr << " avg f count: " << f_count/(float)list_sz << endl;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index ef022469..fc83f08e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -15,7 +15,7 @@ epochs=3 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=0.0001 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
+learning_rate=0.1 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index a35bbe6f..75f47337 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,17 +4,18 @@ Reading ./nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Example feature: Shape_S00000_T00000
-Seeding random number sequence to 4049211323
+Seeding random number sequence to 3751911392
dtrain
Parameters:
k 100
N 4
T 3
+ batch 0
scorer 'fixed_stupid_bleu'
sample from 'kbest'
filter 'uniq'
- learning rate 1
+ learning rate 0.1
gamma 0
loss margin 0
faster perceptron 1
@@ -25,9 +26,9 @@ Parameters:
l1 reg 0 'none'
pclr no
max pairs 4294967295
+ repeat 1
cdec cfg './cdec.ini'
- input './nc-wmt11.de.gz'
- refs './nc-wmt11.en.gz'
+ input './nc-wmt11.gz'
output '-'
stop_after 10
(a dot represents 10 inputs)
@@ -35,25 +36,26 @@ Iteration #1 of 3.
. 10
Stopping after 10 input sentences.
WEIGHTS
- Glue = -1100
- WordPenalty = -82.082
- LanguageModel = -3199.1
- LanguageModel_OOV = -192
- PhraseModel_0 = +3128.2
- PhraseModel_1 = -1610.2
- PhraseModel_2 = -4336.5
- PhraseModel_3 = +2910.3
- PhraseModel_4 = +2523.2
- PhraseModel_5 = +506
- PhraseModel_6 = +1467
- PassThrough = -387
+ Glue = -110
+ WordPenalty = -8.2082
+ LanguageModel = -319.91
+ LanguageModel_OOV = -19.2
+ PhraseModel_0 = +312.82
+ PhraseModel_1 = -161.02
+ PhraseModel_2 = -433.65
+ PhraseModel_3 = +291.03
+ PhraseModel_4 = +252.32
+ PhraseModel_5 = +50.6
+ PhraseModel_6 = +146.7
+ PassThrough = -38.7
---
1best avg score: 0.16966 (+0.16966)
- 1best avg model score: 2.9874e+05 (+2.9874e+05)
- avg # pairs: 906.3 (meaningless)
- avg # rank err: 906.3
+ 1best avg model score: 29874 (+29874)
+ avg # pairs: 906.3
+ avg # rank err: 0 (meaningless)
avg # margin viol: 0
- non0 feature count: 825
+ k-best loss imp: 100%
+ non0 feature count: 832
avg list sz: 91.3
avg f count: 139.77
(time 0.35 min, 2.1 s/S)
@@ -61,25 +63,26 @@ WEIGHTS
Iteration #2 of 3.
. 10
WEIGHTS
- Glue = -1221
- WordPenalty = +836.89
- LanguageModel = +2332.3
- LanguageModel_OOV = -1451
- PhraseModel_0 = +1507.2
- PhraseModel_1 = -2728.4
- PhraseModel_2 = -4183.6
- PhraseModel_3 = +1816.3
- PhraseModel_4 = -2894.7
- PhraseModel_5 = +1403
- PhraseModel_6 = +35
- PassThrough = -1097
+ Glue = -122.1
+ WordPenalty = +83.689
+ LanguageModel = +233.23
+ LanguageModel_OOV = -145.1
+ PhraseModel_0 = +150.72
+ PhraseModel_1 = -272.84
+ PhraseModel_2 = -418.36
+ PhraseModel_3 = +181.63
+ PhraseModel_4 = -289.47
+ PhraseModel_5 = +140.3
+ PhraseModel_6 = +3.5
+ PassThrough = -109.7
---
1best avg score: 0.17399 (+0.004325)
- 1best avg model score: 49369 (-2.4937e+05)
- avg # pairs: 662.4 (meaningless)
- avg # rank err: 662.4
+ 1best avg model score: 4936.9 (-24937)
+ avg # pairs: 662.4
+ avg # rank err: 0 (meaningless)
avg # margin viol: 0
- non0 feature count: 1235
+ k-best loss imp: 100%
+ non0 feature count: 1240
avg list sz: 91.3
avg f count: 125.11
(time 0.27 min, 1.6 s/S)
@@ -87,32 +90,33 @@ WEIGHTS
Iteration #3 of 3.
. 10
WEIGHTS
- Glue = -1574
- WordPenalty = -17.372
- LanguageModel = +6861.8
- LanguageModel_OOV = -3997
- PhraseModel_0 = -398.76
- PhraseModel_1 = -3419.6
- PhraseModel_2 = -3186.7
- PhraseModel_3 = +1050.8
- PhraseModel_4 = -2902.7
- PhraseModel_5 = -486
- PhraseModel_6 = -436
- PassThrough = -2985
+ Glue = -157.4
+ WordPenalty = -1.7372
+ LanguageModel = +686.18
+ LanguageModel_OOV = -399.7
+ PhraseModel_0 = -39.876
+ PhraseModel_1 = -341.96
+ PhraseModel_2 = -318.67
+ PhraseModel_3 = +105.08
+ PhraseModel_4 = -290.27
+ PhraseModel_5 = -48.6
+ PhraseModel_6 = -43.6
+ PassThrough = -298.5
---
1best avg score: 0.30742 (+0.13343)
- 1best avg model score: -1.5393e+05 (-2.0329e+05)
- avg # pairs: 623.8 (meaningless)
- avg # rank err: 623.8
+ 1best avg model score: -15393 (-20329)
+ avg # pairs: 623.8
+ avg # rank err: 0 (meaningless)
avg # margin viol: 0
- non0 feature count: 1770
+ k-best loss imp: 100%
+ non0 feature count: 1776
avg list sz: 91.3
avg f count: 118.58
-(time 0.25 min, 1.5 s/S)
+(time 0.28 min, 1.7 s/S)
Writing weights file to '-' ...
done
---
Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742].
-This took 0.86667 min.
+This took 0.9 min.
--
cgit v1.2.3