summaryrefslogtreecommitdiff
path: root/dtrain/dtrain.cc
diff options
context:
space:
mode:
authorChris Dyer <cdyer@cs.cmu.edu>2012-11-05 21:34:22 -0500
committerChris Dyer <cdyer@cs.cmu.edu>2012-11-05 21:34:22 -0500
commit709bf16fbf7e88d15c9a9f3356c63e9ff38fa05d (patch)
tree3ec67e4084b6be65555d70766ebfe5d2b18cd410 /dtrain/dtrain.cc
parent782fb27af98ed98256cc25c832131c59c8e9ce9c (diff)
parent2eb77b77d28f6d9dc88ecb6ca999994bdb555445 (diff)
Merge branch 'master' of https://github.com/redpony/cdec
Diffstat (limited to 'dtrain/dtrain.cc')
-rw-r--r--dtrain/dtrain.cc4
1 files changed, 2 insertions, 2 deletions
diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc
index b7a4bb6f..18286668 100644
--- a/dtrain/dtrain.cc
+++ b/dtrain/dtrain.cc
@@ -24,13 +24,13 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
("pair_threshold", po::value<score_t>()->default_value(0.), "bleu [0,1] threshold to filter pairs")
("N", po::value<unsigned>()->default_value(4), "N for Ngrams (BLEU)")
("scorer", po::value<string>()->default_value("stupid_bleu"), "scoring: bleu, stupid_, smooth_, approx_, lc_")
- ("learning_rate", po::value<weight_t>()->default_value(0.0001), "learning rate")
+ ("learning_rate", po::value<weight_t>()->default_value(1.0), "learning rate")
("gamma", po::value<weight_t>()->default_value(0.), "gamma for SVM (0 for perceptron)")
("select_weights", po::value<string>()->default_value("last"), "output best, last, avg weights ('VOID' to throw away)")
("rescale", po::value<bool>()->zero_tokens(), "rescale weight vector after each input")
("l1_reg", po::value<string>()->default_value("none"), "apply l1 regularization as in 'Tsuroka et al' (2010)")
("l1_reg_strength", po::value<weight_t>(), "l1 regularization strength")
- ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPL") // TODO
+ ("fselect", po::value<weight_t>()->default_value(-1), "select top x percent (or by threshold) of features after each epoch NOT IMPLEMENTED") // TODO
("approx_bleu_d", po::value<score_t>()->default_value(0.9), "discount for approx. BLEU")
("scale_bleu_diff", po::value<bool>()->zero_tokens(), "learning rate <- bleu diff of a misranked pair")
("loss_margin", po::value<weight_t>()->default_value(0.), "update if no error in pref pair but model scores this near")