diff options
author | Chris Dyer <redpony@gmail.com> | 2014-01-15 20:33:07 -0500 |
---|---|---|
committer | Chris Dyer <redpony@gmail.com> | 2014-01-15 20:33:07 -0500 |
commit | f3f8dbaec0c91d90fc2e9fdec988081659a7c48c (patch) | |
tree | 37984cba680c1dbaf516968920b81533df0d8820 /training | |
parent | fde257636aa3a2fa04f89829176a345e15664565 (diff) | |
parent | 7a9c1c85fecb787b1ee4b8e9552ed35a635e3c39 (diff) |
Merge branch 'master' of https://github.com/redpony/cdec
Diffstat (limited to 'training')
-rw-r--r-- | training/crf/mpi_adagrad_optimize.cc | 14 | ||||
-rw-r--r-- | training/dtrain/dtrain.cc | 32 | ||||
-rw-r--r-- | training/dtrain/dtrain.h | 2 | ||||
-rw-r--r-- | training/dtrain/examples/standard/cdec.ini | 1 | ||||
-rw-r--r-- | training/dtrain/examples/standard/dtrain.ini | 2 | ||||
-rw-r--r-- | training/dtrain/examples/standard/expected-output | 116 | ||||
-rw-r--r-- | training/dtrain/pairsampling.h | 1 | ||||
-rw-r--r-- | training/dtrain/score.cc | 18 | ||||
-rw-r--r-- | training/dtrain/score.h | 18 |
9 files changed, 109 insertions, 95 deletions
diff --git a/training/crf/mpi_adagrad_optimize.cc b/training/crf/mpi_adagrad_optimize.cc index 39bd763e..bac57324 100644 --- a/training/crf/mpi_adagrad_optimize.cc +++ b/training/crf/mpi_adagrad_optimize.cc @@ -157,11 +157,11 @@ struct TrainingObserver : public DecoderObserver { void GetGradient(SparseVector<double>* g) const { g->clear(); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : acc_grad) { #else for (FastSparseVector<prob_t>::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) { - pair<unsigned, double>& gi = *it; + const pair<unsigned, prob_t>& gi = *it; #endif g->set_value(gi.first, -gi.second.as_float()); } @@ -190,7 +190,7 @@ class AdaGradOptimizer { G() {} void update(const SparseVector<double>& g, vector<double>* x, SparseVector<double>* sx) { if (x->size() > G.size()) G.resize(x->size(), 0.0); -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : g) { #else for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -220,7 +220,7 @@ class AdaGradL1Optimizer { G.resize(x->size(), 0.0); u.resize(x->size(), 0.0); } -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& gi : g) { #else for (SparseVector<double>::const_iterator it = g.begin(); it != g.end(); ++it) { @@ -236,11 +236,11 @@ class AdaGradL1Optimizer { // compute updates (avoid invalidating iterators by putting them all // in the vector vupdate and applying them after this) vector<pair<unsigned, double>> vupdate; -#if HAVE_CXX11 +#if HAVE_CXX11 && (__GNUC_MINOR__ > 4 || __GNUC__ > 4) for (auto& xi : *sx) { #else - for (SparseVector<double>::const_iterator it = sx->begin(); it != sx->end(); ++it) { - const pair<unsigned,double>& gi = *it; + for (SparseVector<double>::iterator it = sx->begin(); it != sx->end(); ++it) { + const pair<unsigned,double>& xi = *it; #endif double z = fabs(u[xi.first] / t) - lambda; double s = 1; diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc index 0a27a068..b01cf421 100644 --- a/training/dtrain/dtrain.cc +++ b/training/dtrain/dtrain.cc @@ -44,7 +44,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg) ("pclr", po::value<string>()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate") ("batch", po::value<bool>()->zero_tokens(), "do batch optimization") ("repeat", po::value<unsigned>()->default_value(1), "repeat optimization over kbest list this number of times") - //("test-k-best", po::value<bool>()->zero_tokens(), "check if optimization works (use repeat >= 2)") + ("check", po::value<bool>()->zero_tokens(), "produce list of loss differentials") ("noup", po::value<bool>()->zero_tokens(), "do not update weights"); po::options_description cl("Command Line Options"); cl.add_options() @@ -130,8 +130,8 @@ main(int argc, char** argv) const score_t approx_bleu_d = cfg["approx_bleu_d"].as<score_t>(); const unsigned max_pairs = cfg["max_pairs"].as<unsigned>(); int repeat = cfg["repeat"].as<unsigned>(); - //bool test_k_best = false; - //if (cfg.count("test-k-best")) test_k_best = true; + bool check = false; + if (cfg.count("check")) check = true; weight_t loss_margin = cfg["loss_margin"].as<weight_t>(); bool batch = false; if (cfg.count("batch")) batch = true; @@ -412,27 +412,38 @@ main(int argc, char** argv) int cur_npairs = pairs.size(); npairs += cur_npairs; - score_t kbest_loss_first, kbest_loss_last = 0.0; + score_t kbest_loss_first = 0.0, kbest_loss_last = 0.0; + + if (check) repeat = 2; + vector<float> losses; // for check for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); it != pairs.end(); it++) { score_t model_diff = it->first.model - it->second.model; - kbest_loss_first += max(0.0, -1.0 * model_diff); + score_t loss = max(0.0, -1.0 * model_diff); + losses.push_back(loss); + kbest_loss_first += loss; } + score_t kbest_loss = 0.0; for (int ki=0; ki < repeat; ki++) { - score_t kbest_loss = 0.0; // test-k-best SparseVector<weight_t> lambdas_copy; // for l1 regularization SparseVector<weight_t> sum_up; // for pclr if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas; + unsigned pair_idx = 0; // for check for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin(); it != pairs.end(); it++) { score_t model_diff = it->first.model - it->second.model; + score_t loss = max(0.0, -1.0 * model_diff); + + if (check && ki == 1) cout << losses[pair_idx] - loss << endl; + pair_idx++; + if (repeat > 1) { model_diff = lambdas.dot(it->first.f) - lambdas.dot(it->second.f); - kbest_loss += max(0.0, -1.0 * model_diff); + kbest_loss += loss; } bool rank_error = false; score_t margin; @@ -449,7 +460,7 @@ main(int argc, char** argv) if (rank_error || margin < loss_margin) { SparseVector<weight_t> diff_vec = it->first.f - it->second.f; if (batch) { - batch_loss += max(0., -1.0*model_diff); + batch_loss += max(0., -1.0 * model_diff); batch_updates += diff_vec; continue; } @@ -529,9 +540,8 @@ main(int argc, char** argv) if (ki==repeat-1) { // done kbest_loss_last = kbest_loss; if (repeat > 1) { - score_t best_score = -1.; score_t best_model = -std::numeric_limits<score_t>::max(); - unsigned best_idx; + unsigned best_idx = 0; for (unsigned i=0; i < samples->size(); i++) { score_t s = lambdas.dot((*samples)[i].f); if (s > best_model) { @@ -634,6 +644,8 @@ main(int argc, char** argv) Weights::WriteToFile(w_fn, decoder_weights, true); } + if (check) cout << "---" << endl; + } // outer loop if (average) w_average /= (weight_t)T; diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h index ccb5ad4d..eb23b813 100644 --- a/training/dtrain/dtrain.h +++ b/training/dtrain/dtrain.h @@ -64,7 +64,7 @@ struct LocalScorer vector<score_t> w_; virtual score_t - Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; + Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0; virtual void Reset() {} // only for ApproxBleuScorer, LinearBleuScorer diff --git a/training/dtrain/examples/standard/cdec.ini b/training/dtrain/examples/standard/cdec.ini index e1edc68d..6cba9e1e 100644 --- a/training/dtrain/examples/standard/cdec.ini +++ b/training/dtrain/examples/standard/cdec.ini @@ -21,6 +21,7 @@ feature_function=RuleIdentityFeatures feature_function=RuleSourceBigramFeatures feature_function=RuleTargetBigramFeatures feature_function=RuleShape +feature_function=RuleWordAlignmentFeatures #feature_function=SourceSpanSizeFeatures #feature_function=SourceWordPenalty #feature_function=SpanFeatures diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini index fc83f08e..a515db02 100644 --- a/training/dtrain/examples/standard/dtrain.ini +++ b/training/dtrain/examples/standard/dtrain.ini @@ -2,7 +2,7 @@ #refs=./nc-wmt11.en.gz bitext=./nc-wmt11.gz output=- # a weights file (add .gz for gzip compression) or STDOUT '-' -select_weights=VOID # output average (over epochs) weight vector +select_weights=avg # output average (over epochs) weight vector decoder_config=./cdec.ini # config for cdec # weights for these features will be printed on each iteration print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output index 75f47337..fa831221 100644 --- a/training/dtrain/examples/standard/expected-output +++ b/training/dtrain/examples/standard/expected-output @@ -4,7 +4,7 @@ Reading ./nc-wmt11.en.srilm.gz ----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100 **************************************************************************************************** Example feature: Shape_S00000_T00000 -Seeding random number sequence to 3751911392 +Seeding random number sequence to 4138446869 dtrain Parameters: @@ -22,7 +22,7 @@ Parameters: pairs 'XYX' hi lo 0.1 pair threshold 0 - select weights 'VOID' + select weights 'avg' l1 reg 0 'none' pclr no max pairs 4294967295 @@ -36,87 +36,87 @@ Iteration #1 of 3. . 10 Stopping after 10 input sentences. WEIGHTS - Glue = -110 - WordPenalty = -8.2082 - LanguageModel = -319.91 - LanguageModel_OOV = -19.2 - PhraseModel_0 = +312.82 - PhraseModel_1 = -161.02 - PhraseModel_2 = -433.65 - PhraseModel_3 = +291.03 - PhraseModel_4 = +252.32 - PhraseModel_5 = +50.6 - PhraseModel_6 = +146.7 - PassThrough = -38.7 + Glue = -80.3 + WordPenalty = -51.247 + LanguageModel = +282.46 + LanguageModel_OOV = -85.8 + PhraseModel_0 = -100.06 + PhraseModel_1 = -98.692 + PhraseModel_2 = -9.4958 + PhraseModel_3 = +18.535 + PhraseModel_4 = +62.35 + PhraseModel_5 = +7 + PhraseModel_6 = +31.4 + PassThrough = -126.5 --- - 1best avg score: 0.16966 (+0.16966) - 1best avg model score: 29874 (+29874) - avg # pairs: 906.3 + 1best avg score: 0.25631 (+0.25631) + 1best avg model score: -4843.6 (-4843.6) + avg # pairs: 744.4 avg # rank err: 0 (meaningless) avg # margin viol: 0 k-best loss imp: 100% - non0 feature count: 832 + non0 feature count: 1274 avg list sz: 91.3 - avg f count: 139.77 -(time 0.35 min, 2.1 s/S) + avg f count: 143.72 +(time 0.4 min, 2.4 s/S) Iteration #2 of 3. . 10 WEIGHTS - Glue = -122.1 - WordPenalty = +83.689 - LanguageModel = +233.23 - LanguageModel_OOV = -145.1 - PhraseModel_0 = +150.72 - PhraseModel_1 = -272.84 - PhraseModel_2 = -418.36 - PhraseModel_3 = +181.63 - PhraseModel_4 = -289.47 - PhraseModel_5 = +140.3 - PhraseModel_6 = +3.5 - PassThrough = -109.7 + Glue = -117.4 + WordPenalty = -99.584 + LanguageModel = +395.05 + LanguageModel_OOV = -136.8 + PhraseModel_0 = +40.614 + PhraseModel_1 = -123.29 + PhraseModel_2 = -152 + PhraseModel_3 = -161.13 + PhraseModel_4 = -76.379 + PhraseModel_5 = +39.1 + PhraseModel_6 = +137.7 + PassThrough = -162.1 --- - 1best avg score: 0.17399 (+0.004325) - 1best avg model score: 4936.9 (-24937) - avg # pairs: 662.4 + 1best avg score: 0.26751 (+0.011198) + 1best avg model score: -10061 (-5216.9) + avg # pairs: 639.1 avg # rank err: 0 (meaningless) avg # margin viol: 0 k-best loss imp: 100% - non0 feature count: 1240 + non0 feature count: 1845 avg list sz: 91.3 - avg f count: 125.11 -(time 0.27 min, 1.6 s/S) + avg f count: 139.88 +(time 0.35 min, 2.1 s/S) Iteration #3 of 3. . 10 WEIGHTS - Glue = -157.4 - WordPenalty = -1.7372 - LanguageModel = +686.18 - LanguageModel_OOV = -399.7 - PhraseModel_0 = -39.876 - PhraseModel_1 = -341.96 - PhraseModel_2 = -318.67 - PhraseModel_3 = +105.08 - PhraseModel_4 = -290.27 - PhraseModel_5 = -48.6 - PhraseModel_6 = -43.6 - PassThrough = -298.5 + Glue = -101.1 + WordPenalty = -139.97 + LanguageModel = +327.98 + LanguageModel_OOV = -234.7 + PhraseModel_0 = -144.49 + PhraseModel_1 = -263.88 + PhraseModel_2 = -149.25 + PhraseModel_3 = -38.805 + PhraseModel_4 = +50.575 + PhraseModel_5 = -52.4 + PhraseModel_6 = +41.6 + PassThrough = -230.2 --- - 1best avg score: 0.30742 (+0.13343) - 1best avg model score: -15393 (-20329) - avg # pairs: 623.8 + 1best avg score: 0.36222 (+0.094717) + 1best avg model score: -17416 (-7355.5) + avg # pairs: 661.2 avg # rank err: 0 (meaningless) avg # margin viol: 0 k-best loss imp: 100% - non0 feature count: 1776 + non0 feature count: 2163 avg list sz: 91.3 - avg f count: 118.58 -(time 0.28 min, 1.7 s/S) + avg f count: 132.53 +(time 0.33 min, 2 s/S) Writing weights file to '-' ... done --- -Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742]. -This took 0.9 min. +Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.36222]. +This took 1.0833 min. diff --git a/training/dtrain/pairsampling.h b/training/dtrain/pairsampling.h index 3f67e209..1a3c498c 100644 --- a/training/dtrain/pairsampling.h +++ b/training/dtrain/pairsampling.h @@ -112,6 +112,7 @@ _PRO_cmp_pair_by_diff_d(pair<ScoredHyp,ScoredHyp> a, pair<ScoredHyp,ScoredHyp> b inline void PROsampling(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, score_t threshold, unsigned max, bool _unused=false, float _also_unused=0) { + sort(s->begin(), s->end(), cmp_hyp_by_score_d); unsigned max_count = 5000, count = 0, sz = s->size(); bool b = false; for (unsigned i = 0; i < sz-1; i++) { diff --git a/training/dtrain/score.cc b/training/dtrain/score.cc index 96d6e10a..127f34d2 100644 --- a/training/dtrain/score.cc +++ b/training/dtrain/score.cc @@ -32,7 +32,7 @@ BleuScorer::Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref } score_t -BleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +BleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -52,7 +52,7 @@ BleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * NOTE: 0 iff no 1gram match ('grounded') */ score_t -StupidBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +StupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -81,7 +81,7 @@ StupidBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * (Nakov et al. '12) */ score_t -FixedStupidBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +FixedStupidBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -112,7 +112,7 @@ FixedStupidBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * NOTE: max is 0.9375 (with N=4) */ score_t -SmoothBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +SmoothBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -143,7 +143,7 @@ SmoothBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * sum up Ngram precisions */ score_t -SumBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +SumBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -167,7 +167,7 @@ SumBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * sum up exp(Ngram precisions) */ score_t -SumExpBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +SumExpBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -191,7 +191,7 @@ SumExpBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * sum up exp(weight * log(Ngram precisions)) */ score_t -SumWhateverBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +SumWhateverBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -224,7 +224,7 @@ SumWhateverBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * No scaling by src len. */ score_t -ApproxBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +ApproxBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); @@ -255,7 +255,7 @@ ApproxBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, * */ score_t -LinearBleuScorer::Score(vector<WordID>& hyp, vector<WordID>& ref, +LinearBleuScorer::Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/) { unsigned hyp_len = hyp.size(), ref_len = ref.size(); diff --git a/training/dtrain/score.h b/training/dtrain/score.h index 53e970ba..1cdd3fa9 100644 --- a/training/dtrain/score.h +++ b/training/dtrain/score.h @@ -138,43 +138,43 @@ make_ngram_counts(const vector<WordID>& hyp, const vector<WordID>& ref, const un struct BleuScorer : public LocalScorer { score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len); - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct StupidBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct FixedStupidBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct SmoothBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct SumBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct SumExpBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {} }; struct SumWhateverBleuScorer : public LocalScorer { - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/); void Reset() {}; }; @@ -194,7 +194,7 @@ struct ApproxBleuScorer : public BleuScorer glob_hyp_len_ = glob_ref_len_ = glob_src_len_ = 0.; } - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned src_len); }; struct LinearBleuScorer : public BleuScorer @@ -207,7 +207,7 @@ struct LinearBleuScorer : public BleuScorer onebest_counts_.One(); } - score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/); + score_t Score(const vector<WordID>& hyp, const vector<WordID>& ref, const unsigned rank, const unsigned /*src_len*/); inline void Reset() { onebest_len_ = 1; |