diff options
author | Chris Dyer <redpony@gmail.com> | 2012-11-05 10:25:54 -0800 |
---|---|---|
committer | Chris Dyer <redpony@gmail.com> | 2012-11-05 10:25:54 -0800 |
commit | 2eb77b77d28f6d9dc88ecb6ca999994bdb555445 (patch) | |
tree | 1e19af403041bd86933ce88fa1a70030ab100ca2 /dtrain/dtrain.cc | |
parent | c615c37501fa8576584a510a9d2bfe2fdd5bace7 (diff) | |
parent | 0c54220adfaada6ad1e2d54f31a9895da35127fd (diff) |
Merge pull request #10 from pks/master
renamed RuleNgramFeatures
Diffstat (limited to 'dtrain/dtrain.cc')
-rw-r--r-- | dtrain/dtrain.cc | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc index b7a4bb6f..18286668 100644 --- a/dtrain/dtrain.cc +++ b/dtrain/dtrain.cc @@ -24,13 +24,13 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg) ("pair_threshold", po::value<score_t>()->default_value(0.), "bleu [0,1] threshold to filter pairs") ("N", po::value<unsigned>()->default_value(4), "N for Ngrams (BLEU)") ("scorer", po::value<string>()->default_value("stupid_bleu"), "scoring: bleu, stupid_, smooth_, approx_, lc_") - ("learning_rate", po::value<weight_t>()->default_value(0.0001), "learning rate") + ("learning_rate", po::value<weight_t>()->default_value(1.0), "learning rate") ("gamma", po::value<weight_t>()->default_value(0.), "gamma for SVM (0 for perceptron)") ("select_weights", po::value<string>()->default_value("last"), "output best, last, avg weights ('VOID' to throw away)") ("rescale", po::value<bool>()->zero_tokens(), "rescale weight vector after each input") ("l1_reg", po::value<string>()->default_value("none"), "apply l1 regularization as in 'Tsuroka et al' (2010)") ("l1_reg_strength", po::value<weight_t>(), "l1 regularization strength") - ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPL") // TODO + ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO ("approx_bleu_d", po::value<score_t>()->default_value(0.9), "discount for approx. BLEU") ("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair") ("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near") |