From fe471bb707226052551d75b043295ca5f57261c0 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Sun, 25 Sep 2011 20:23:09 +0200
Subject: removed some quirks, less boost, prettier code, score_t
---
dtrain/README | 11 +-
dtrain/dtrain.cc | 390 +++++++++++++++----------------
dtrain/kbestget.h | 84 ++++---
dtrain/ksampler.h | 24 +-
dtrain/pairsampling.h | 12 +-
dtrain/score.cc | 42 ++--
dtrain/score.h | 27 +--
dtrain/test/cdec_toy/cdec.ini | 1 +
dtrain/test/example/cdec.ini | 7 +
dtrain/test/example/dtrain.ini | 11 +
dtrain/test/example/nc-1k-tabs.gz | Bin 0 -> 21185883 bytes
dtrain/test/example/nc-1k.gz | Bin 0 -> 21474865 bytes
dtrain/test/example/nc-wmt11.en.srilm.gz | Bin 0 -> 16017291 bytes
dtrain/test/example/weights.gz | Bin 0 -> 255 bytes
dtrain/test/log_reg_dyer/bin_class.cc | 4 +
dtrain/test/log_reg_dyer/bin_class.h | 22 ++
dtrain/test/log_reg_dyer/log_reg.cc | 39 ++++
dtrain/test/log_reg_dyer/log_reg.h | 14 ++
dtrain/test/logreg/bin_class.cc | 4 -
dtrain/test/logreg/bin_class.h | 22 --
dtrain/test/logreg/log_reg.cc | 39 ----
dtrain/test/logreg/log_reg.h | 14 --
dtrain/test/toy_example/dtrain.ini | 2 +-
23 files changed, 377 insertions(+), 392 deletions(-)
create mode 100644 dtrain/test/example/cdec.ini
create mode 100644 dtrain/test/example/dtrain.ini
create mode 100644 dtrain/test/example/nc-1k-tabs.gz
create mode 100644 dtrain/test/example/nc-1k.gz
create mode 100644 dtrain/test/example/nc-wmt11.en.srilm.gz
create mode 100644 dtrain/test/example/weights.gz
create mode 100644 dtrain/test/log_reg_dyer/bin_class.cc
create mode 100644 dtrain/test/log_reg_dyer/bin_class.h
create mode 100644 dtrain/test/log_reg_dyer/log_reg.cc
create mode 100644 dtrain/test/log_reg_dyer/log_reg.h
delete mode 100644 dtrain/test/logreg/bin_class.cc
delete mode 100644 dtrain/test/logreg/bin_class.h
delete mode 100644 dtrain/test/logreg/log_reg.cc
delete mode 100644 dtrain/test/logreg/log_reg.h
(limited to 'dtrain')
diff --git a/dtrain/README b/dtrain/README
index 137c1b48..42b91b9b 100644
--- a/dtrain/README
+++ b/dtrain/README
@@ -1,13 +1,4 @@
-NOTES
- learner gets all used features (binary! and dense (logprob is sum of logprobs!))
- weights: see decoder/decoder.cc line 548
- (40k sents, k=100 = ~400M mem, 1 iteration 45min)?
- utils/weights.cc: why wv_?
- FD, Weights::wv_ grow too large, see utils/weights.cc;
- decoder/hg.h; decoder/scfg_translator.cc; utils/fdict.cc
-
TODO
- enable kbest FILTERING (nofiler vs unique)
MULTIPARTITE ranking (108010, 1 vs all, cluster modelscore;score)
what about RESCORING?
REMEMBER kbest (merge) weights?
@@ -30,7 +21,7 @@ TODO
non deterministic, high variance, RANDOM RESTARTS
use separate TEST SET
-KNOWN BUGS PROBLEMS
+KNOWN BUGS, PROBLEMS
doesn't select best iteration for weigts
if size of candidate < N => 0 score
cdec kbest vs 1best (no -k param), rescoring? => ok(?)
diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc
index 76fdb49c..a70ca2f1 100644
--- a/dtrain/dtrain.cc
+++ b/dtrain/dtrain.cc
@@ -4,64 +4,66 @@
bool
dtrain_init(int argc, char** argv, po::variables_map* cfg)
{
- po::options_description conff("Configuration File Options");
- conff.add_options()
- ("decoder_config", po::value(), "configuration file for cdec")
- ("kbest", po::value()->default_value(100), "k for kbest")
- ("ngrams", po::value()->default_value(3), "N for Ngrams")
- ("filter", po::value()->default_value("unique"), "filter kbest list")
- ("epochs", po::value()->default_value(2), "# of iterations T")
- ("input", po::value()->default_value("-"), "input file")
- ("output", po::value()->default_value("-"), "output weights file")
- ("scorer", po::value()->default_value("stupid_bleu"), "scoring metric")
- ("stop_after", po::value()->default_value(0), "stop after X input sentences")
- ("input_weights", po::value(), "input weights file (e.g. from previous iteration)")
- ("wprint", po::value(), "weights to print on each iteration")
- ("hstreaming", po::value()->zero_tokens(), "run in hadoop streaming mode")
- ("noup", po::value()->zero_tokens(), "do not update weights");
-
- po::options_description clo("Command Line Options");
- clo.add_options()
+ po::options_description ini("Configuration File Options");
+ ini.add_options()
+ ("input", po::value()->default_value("-"), "input file")
+ ("output", po::value()->default_value("-"), "output weights file (or VOID)")
+ ("input_weights", po::value(), "input weights file (e.g. from previous iteration)")
+ ("decoder_config", po::value(), "configuration file for cdec")
+ ("ksamples", po::value()->default_value(100), "size of kbest or sample from forest")
+ ("sample_from", po::value()->default_value("kbest"), "where to get translations from")
+ ("filter", po::value()->default_value("unique"), "filter kbest list")
+ ("pair_sampling", po::value()->default_value("all"), "how to sample pairs: all, rand")
+ ("ngrams", po::value()->default_value(3), "N for Ngrams")
+ ("epochs", po::value()->default_value(2), "# of iterations T")
+ ("scorer", po::value()->default_value("stupid_bleu"), "scoring metric")
+ ("stop_after", po::value()->default_value(0), "stop after X input sentences")
+ ("print_weights", po::value(), "weights to print on each iteration")
+ ("hstreaming", po::value()->zero_tokens(), "run in hadoop streaming mode")
+ ("learning_rate", po::value()->default_value(0.0005), "learning rate")
+ ("gamma", po::value()->default_value(0.), "gamma for SVM (0 for perceptron)")
+ ("noup", po::value()->zero_tokens(), "do not update weights");
+ po::options_description cl("Command Line Options");
+ cl.add_options()
("config,c", po::value(), "dtrain config file")
("quiet,q", po::value()->zero_tokens(), "be quiet")
("verbose,v", po::value()->zero_tokens(), "be verbose");
- po::options_description config_options, cmdline_options;
-
- config_options.add(conff);
- cmdline_options.add(clo);
- cmdline_options.add(conff);
-
- po::store(parse_command_line(argc, argv, cmdline_options), *cfg);
+ cl.add(ini);
+ po::store(parse_command_line(argc, argv, cl), *cfg);
if (cfg->count("config")) {
- ifstream config((*cfg)["config"].as().c_str());
- po::store(po::parse_config_file(config, config_options), *cfg);
+ ifstream ini_f((*cfg)["config"].as().c_str());
+ po::store(po::parse_config_file(ini_f, ini), *cfg);
}
po::notify(*cfg);
-
if (!cfg->count("decoder_config")) {
- cerr << cmdline_options << endl;
+ cerr << cl << endl;
return false;
}
if (cfg->count("hstreaming") && (*cfg)["output"].as() != "-") {
cerr << "When using 'hstreaming' the 'output' param should be '-'.";
return false;
}
- if (cfg->count("filter") && (*cfg)["filter"].as() != "unique"
+ if ((*cfg)["filter"].as() != "unique"
&& (*cfg)["filter"].as() != "no") {
- cerr << "Wrong 'filter' type: '" << (*cfg)["filter"].as() << "'." << endl;
+ cerr << "Wrong 'filter' param: '" << (*cfg)["filter"].as() << "', use 'unique' or 'no'." << endl;
+ }
+ if ((*cfg)["pair_sampling"].as() != "all"
+ && (*cfg)["pair_sampling"].as() != "rand") {
+ cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as() << "', use 'all' or 'rand'." << endl;
+ }
+ if ((*cfg)["sample_from"].as() != "kbest"
+ && (*cfg)["sample_from"].as() != "forest") {
+ cerr << "Wrong 'sample_from' param: '" << (*cfg)["sample_from"].as() << "', use 'kbest' or 'forest'." << endl;
}
return true;
}
-#include "filelib.h"
-
int
main(int argc, char** argv)
{
- cout << _p5;
// handle most parameters
po::variables_map cfg;
- if (! dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong
+ if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong
bool quiet = false;
if (cfg.count("quiet")) quiet = true;
bool verbose = false;
@@ -73,43 +75,37 @@ main(int argc, char** argv)
hstreaming = true;
quiet = true;
}
- const size_t k = cfg["kbest"].as();
+ const size_t k = cfg["ksamples"].as();
const size_t N = cfg["ngrams"].as();
const size_t T = cfg["epochs"].as();
const size_t stop_after = cfg["stop_after"].as();
const string filter_type = cfg["filter"].as();
- if (!quiet) {
- cout << endl << "dtrain" << endl << "Parameters:" << endl;
- cout << setw(25) << "k " << k << endl;
- cout << setw(25) << "N " << N << endl;
- cout << setw(25) << "T " << T << endl;
- if (cfg.count("stop-after"))
- cout << setw(25) << "stop_after " << stop_after << endl;
- if (cfg.count("input_weights"))
- cout << setw(25) << "weights " << cfg["weights"].as() << endl;
- cout << setw(25) << "input " << "'" << cfg["input"].as() << "'" << endl;
- cout << setw(25) << "filter " << "'" << filter_type << "'" << endl;
- }
-
- vector wprint;
- if (cfg.count("wprint")) {
- boost::split(wprint, cfg["wprint"].as(), boost::is_any_of(" "));
- }
+ const string sample_from = cfg["sample_from"].as();
+ const string pair_sampling = cfg["pair_sampling"].as();
+ vector print_weights;
+ if (cfg.count("print_weights"))
+ boost::split(print_weights, cfg["print_weights"].as(), boost::is_any_of(" "));
- // setup decoder, observer
+ // setup decoder
register_feature_functions();
SetSilent(true);
ReadFile ini_rf(cfg["decoder_config"].as());
if (!quiet)
cout << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
Decoder decoder(ini_rf.stream());
- KBestGetter observer(k, filter_type);
- MT19937 rng;
- //KSampler observer(k, &rng);
+
+ MT19937 rng; // random number generator
+ // setup decoder observer
+ HypoSampler* observer;
+ if (sample_from == "kbest") {
+ observer = dynamic_cast(new KBestGetter(k, filter_type));
+ } else {
+ observer = dynamic_cast(new KSampler(k, &rng));
+ }
// scoring metric/scorer
string scorer_str = cfg["scorer"].as();
- double (*scorer)(NgramCounts&, const size_t, const size_t, size_t, vector);
+ score_t (*scorer)(NgramCounts&, const size_t, const size_t, size_t, vector);
if (scorer_str == "bleu") {
scorer = &bleu;
} else if (scorer_str == "stupid_bleu") {
@@ -122,58 +118,64 @@ main(int argc, char** argv)
cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl;
exit(1);
}
- // for approx_bleu
NgramCounts global_counts(N); // counts for 1 best translations
- size_t global_hyp_len = 0; // sum hypothesis lengths
- size_t global_ref_len = 0; // sum reference lengths
- // this is all BLEU implmentations
- vector bleu_weights; // we leave this empty -> 1/N; TODO?
+ size_t global_hyp_len = 0; // sum hypothesis lengths
+ size_t global_ref_len = 0; // sum reference lengths
+ // ^^^ global_* for approx_bleu
+ vector bleu_weights; // we leave this empty -> 1/N
if (!quiet) cout << setw(26) << "scorer '" << scorer_str << "'" << endl << endl;
// init weights
Weights weights;
- if (cfg.count("weights")) weights.InitFromFile(cfg["weights"].as());
+ if (cfg.count("input_weights")) weights.InitFromFile(cfg["input_weights"].as());
SparseVector lambdas;
weights.InitSparseVector(&lambdas);
vector dense_weights;
+ // meta params for perceptron, SVM
+ double eta = cfg["learning_rate"].as();
+ double gamma = cfg["gamma"].as();
+ lambdas.add_value(FD::Convert("__bias"), 0);
+
// input
- if (!quiet && !verbose)
- cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl;
string input_fn = cfg["input"].as();
- ifstream input;
- if (input_fn != "-") input.open(input_fn.c_str());
- string in;
- vector in_split; // input: src\tref\tpsg
- vector ref_tok; // tokenized reference
- vector ref_ids; // reference as vector of WordID
-
- // buffer input for t > 0
- vector src_str_buf; // source strings, TODO? memory
- vector > ref_ids_buf; // references as WordID vecs
+ ReadFile input(input_fn);
+ // buffer input for t > 0
+ vector src_str_buf; // source strings
+ vector > ref_ids_buf; // references as WordID vecs
// this is for writing the grammar buffer file
char grammar_buf_fn[] = DTRAIN_TMP_DIR"/dtrain-grammars-XXXXXX";
mkstemp(grammar_buf_fn);
ogzstream grammar_buf_out;
grammar_buf_out.open(grammar_buf_fn);
- size_t sid = 0, in_sz = 99999999; // sentence id, input size
- double acc_1best_score = 0., acc_1best_model = 0.;
- vector > scores_per_iter;
- double max_score = 0.;
- size_t best_t = 0;
- bool next = false, stop = false;
- double score = 0.;
- size_t cand_len = 0;
- double overall_time = 0.;
-
- // for the perceptron/SVM; TODO as params
- double eta = 0.0005;
- double gamma = 0.;//01; // -> SVM
- lambdas.add_value(FD::Convert("__bias"), 0);
-
- // for random sampling
- srand (time(NULL));
+ size_t in_sz = 999999999; // input index, input size
+ vector > all_scores;
+ score_t max_score = 0.;
+ size_t best_it = 0;
+ float overall_time = 0.;
+
+ // output cfg
+ if (!quiet) {
+ cout << _p5;
+ cout << endl << "dtrain" << endl << "Parameters:" << endl;
+ cout << setw(25) << "k " << k << endl;
+ cout << setw(25) << "N " << N << endl;
+ cout << setw(25) << "T " << T << endl;
+ if (cfg.count("stop-after"))
+ cout << setw(25) << "stop_after " << stop_after << endl;
+ if (cfg.count("input_weights"))
+ cout << setw(25) << "weights in" << cfg["input_weights"].as() << endl;
+ cout << setw(25) << "input " << "'" << cfg["input"].as() << "'" << endl;
+ cout << setw(25) << "output " << "'" << cfg["output"].as() << "'" << endl;
+ if (sample_from == "kbest")
+ cout << setw(25) << "filter " << "'" << filter_type << "'" << endl;
+ cout << setw(25) << "learning rate " << eta << endl;
+ cout << setw(25) << "gamma " << gamma << endl;
+ cout << setw(25) << "sample from " << "'" << sample_from << "'" << endl;
+ cout << setw(25) << "pairs " << "'" << pair_sampling << "'" << endl;
+ if (!verbose) cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl;
+ }
for (size_t t = 0; t < T; t++) // T epochs
@@ -181,58 +183,44 @@ main(int argc, char** argv)
time_t start, end;
time(&start);
-
- // actually, we need only need this if t > 0 FIXME
igzstream grammar_buf_in;
if (t > 0) grammar_buf_in.open(grammar_buf_fn);
-
- // reset average scores
- acc_1best_score = acc_1best_model = 0.;
-
- // reset sentence counter
- sid = 0;
-
+ score_t score_sum = 0., model_sum = 0.;
+ size_t ii = 0;
if (!quiet) cout << "Iteration #" << t+1 << " of " << T << "." << endl;
while(true)
{
- // get input from stdin or file
- in.clear();
- next = stop = false; // next iteration, premature stop
- if (t == 0) {
- if (input_fn == "-") {
- if (!getline(cin, in)) next = true;
- } else {
- if (!getline(input, in)) next = true;
- }
+ string in;
+ bool next = false, stop = false; // next iteration or premature stop
+ if (t == 0) {
+ if(!getline(*input, in)) next = true;
} else {
- if (sid == in_sz) next = true; // stop if we reach the end of our input
+ if (ii == in_sz) next = true; // stop if we reach the end of our input
}
// stop after X sentences (but still iterate for those)
- if (stop_after > 0 && stop_after == sid && !next) stop = true;
+ if (stop_after > 0 && stop_after == ii && !next) stop = true;
// produce some pretty output
if (!quiet && !verbose) {
- if (sid == 0) cout << " ";
- if ((sid+1) % (DTRAIN_DOTS) == 0) {
- cout << ".";
- cout.flush();
- }
- if ((sid+1) % (20*DTRAIN_DOTS) == 0) {
- cout << " " << sid+1 << endl;
- if (!next && !stop) cout << " ";
- }
- if (stop) {
- if (sid % (20*DTRAIN_DOTS) != 0) cout << " " << sid << endl;
- cout << "Stopping after " << stop_after << " input sentences." << endl;
- } else {
- if (next) {
- if (sid % (20*DTRAIN_DOTS) != 0) {
- cout << " " << sid << endl;
- }
- }
+ if (ii == 0) cout << " ";
+ if ((ii+1) % (DTRAIN_DOTS) == 0) {
+ cout << ".";
+ cout.flush();
+ }
+ if ((ii+1) % (20*DTRAIN_DOTS) == 0) {
+ cout << " " << ii+1 << endl;
+ if (!next && !stop) cout << " ";
+ }
+ if (stop) {
+ if (ii % (20*DTRAIN_DOTS) != 0) cout << " " << ii << endl;
+ cout << "Stopping after " << stop_after << " input sentences." << endl;
+ } else {
+ if (next) {
+ if (ii % (20*DTRAIN_DOTS) != 0) cout << " " << ii << endl;
}
+ }
}
// next iteration
@@ -244,12 +232,15 @@ main(int argc, char** argv)
weights.InitVector(&dense_weights);
decoder.SetWeights(dense_weights);
+ // getting input
+ vector in_split; // input: sid\tsrc\tref\tpsg
+ vector ref_ids; // reference as vector
if (t == 0) {
// handling input
- in_split.clear();
strsplit(in, in_split, '\t', 4);
// getting reference
- ref_tok.clear(); ref_ids.clear();
+ ref_ids.clear();
+ vector ref_tok;
strsplit(in_split[2], ref_tok, ' ');
register_and_convert(ref_tok, ref_ids);
ref_ids_buf.push_back(ref_ids);
@@ -268,7 +259,7 @@ main(int argc, char** argv)
decoder.SetSentenceGrammarFromString(in_split[3]);
// decode
src_str_buf.push_back(in_split[1]);
- decoder.Decode(in_split[1], &observer);
+ decoder.Decode(in_split[1], observer);
} else {
// get buffered grammar
string grammar_str;
@@ -280,73 +271,67 @@ main(int argc, char** argv)
}
decoder.SetSentenceGrammarFromString(grammar_str);
// decode
- decoder.Decode(src_str_buf[sid], &observer);
+ decoder.Decode(src_str_buf[ii], observer);
}
- // get kbest list
- KBestList* kb;
- //if () { // TODO get from forest
- kb = observer.GetKBest();
- //}
+ Samples* samples = observer->GetSamples();
// (local) scoring
- if (t > 0) ref_ids = ref_ids_buf[sid];
- for (size_t i = 0; i < kb->GetSize(); i++) {
- NgramCounts counts = make_ngram_counts(ref_ids, kb->sents[i], N);
+ if (t > 0) ref_ids = ref_ids_buf[ii];
+ score_t score = 0.;
+ for (size_t i = 0; i < samples->GetSize(); i++) {
+ NgramCounts counts = make_ngram_counts(ref_ids, samples->sents[i], N);
if (scorer_str == "approx_bleu") {
+ size_t hyp_len = 0;
if (i == 0) { // 'context of 1best translations'
global_counts += counts;
- global_hyp_len += kb->sents[i].size();
+ global_hyp_len += samples->sents[i].size();
global_ref_len += ref_ids.size();
counts.reset();
- cand_len = 0;
} else {
- cand_len = kb->sents[i].size();
+ hyp_len = samples->sents[i].size();
}
NgramCounts counts_tmp = global_counts + counts;
- score = .9*scorer(counts_tmp,
- global_ref_len,
- global_hyp_len + cand_len, N, bleu_weights);
+ score = .9 * scorer(counts_tmp,
+ global_ref_len,
+ global_hyp_len + hyp_len, N, bleu_weights);
} else {
- cand_len = kb->sents[i].size();
score = scorer(counts,
- ref_ids.size(),
- kb->sents[i].size(), N, bleu_weights);
+ ref_ids.size(),
+ samples->sents[i].size(), N, bleu_weights);
}
- kb->scores.push_back(score);
+ samples->scores.push_back(score);
if (i == 0) {
- acc_1best_score += score;
- acc_1best_model += kb->model_scores[i];
+ score_sum += score;
+ model_sum += samples->model_scores[i];
}
if (verbose) {
if (i == 0) cout << "'" << TD::GetString(ref_ids) << "' [ref]" << endl;
- cout << _p5 << _np << "[hyp " << i << "] " << "'" << TD::GetString(kb->sents[i]) << "'";
- cout << " [SCORE=" << score << ",model="<< kb->model_scores[i] << "]" << endl;
- //cout << kb->feats[i] << endl; // too verbose
+ cout << _p5 << _np << "[hyp " << i << "] " << "'" << TD::GetString(samples->sents[i]) << "'";
+ cout << " [SCORE=" << score << ",model="<< samples->model_scores[i] << "]" << endl;
+ cout << samples->feats[i] << endl;
}
- } // Nbest loop
+ } // sample/scoring loop
if (verbose) cout << endl;
//////////////////////////////////////////////////////////
// UPDATE WEIGHTS
if (!noup) {
-
- int up = 0;
-
- TrainingInstances pairs;
- sample_all_pairs(kb, pairs);
- //sample_rand_pairs(kb, pairs, &rng);
+ vector pairs;
+ if (pair_sampling == "all")
+ sample_all_pairs(samples, pairs);
+ if (pair_sampling == "rand")
+ sample_rand_pairs(samples, pairs, &rng);
- for (TrainingInstances::iterator ti = pairs.begin();
+ for (vector::iterator ti = pairs.begin();
ti != pairs.end(); ti++) {
SparseVector dv;
if (ti->first_score - ti->second_score < 0) {
- up++;
dv = ti->second - ti->first;
//} else {
//dv = ti->first - ti->second;
@@ -360,10 +345,10 @@ main(int argc, char** argv)
if (verbose) {
cout << "{{ f("<< ti->first_rank <<") > f(" << ti->second_rank << ") but g(i)="<< ti->first_score <<" < g(j)="<< ti->second_score << " so update" << endl;
- cout << " i " << TD::GetString(kb->sents[ti->first_rank]) << endl;
- cout << " " << kb->feats[ti->first_rank] << endl;
- cout << " j " << TD::GetString(kb->sents[ti->second_rank]) << endl;
- cout << " " << kb->feats[ti->second_rank] << endl;
+ cout << " i " << TD::GetString(samples->sents[ti->first_rank]) << endl;
+ cout << " " << samples->feats[ti->first_rank] << endl;
+ cout << " j " << TD::GetString(samples->sents[ti->second_rank]) << endl;
+ cout << " " << samples->feats[ti->second_rank] << endl;
cout << " diff vec: " << dv << endl;
cout << " lambdas after update: " << lambdas << endl;
cout << "}}" << endl;
@@ -378,69 +363,66 @@ main(int argc, char** argv)
//double l2 = lambdas.l2norm();
//if (l2) lambdas /= lambdas.l2norm();
- //cout << up << endl;
}
//////////////////////////////////////////////////////////
- ++sid;
+ ++ii;
- if (hstreaming) cerr << "reporter:counter:dtrain,sid," << sid << endl;
+ if (hstreaming) cerr << "reporter:counter:dtrain,sid," << in_split[0] << endl;
} // input loop
if (t == 0) {
- in_sz = sid; // remember size (lines) of input
+ in_sz = ii; // remember size of input (# lines)
grammar_buf_out.close();
- if (input_fn != "-") input.close();
} else {
grammar_buf_in.close();
}
// print some stats
- double avg_1best_score = acc_1best_score/(double)in_sz;
- double avg_1best_model = acc_1best_model/(double)in_sz;
- double avg_1best_score_diff, avg_1best_model_diff;
+ score_t score_avg = score_sum/(score_t)in_sz;
+ score_t model_avg = model_sum/(score_t)in_sz;
+ score_t score_diff, model_diff;
if (t > 0) {
- avg_1best_score_diff = avg_1best_score - scores_per_iter[t-1][0];
- avg_1best_model_diff = avg_1best_model - scores_per_iter[t-1][1];
+ score_diff = score_avg - all_scores[t-1].first;
+ model_diff = model_avg - all_scores[t-1].second;
} else {
- avg_1best_score_diff = avg_1best_score;
- avg_1best_model_diff = avg_1best_model;
+ score_diff = score_avg;
+ model_diff = model_avg;
}
if (!quiet) {
cout << _p5 << _p << "WEIGHTS" << endl;
- for (vector::iterator it = wprint.begin(); it != wprint.end(); it++) {
- cout << setw(16) << *it << " = " << dense_weights[FD::Convert(*it)] << endl;
+ for (vector::iterator it = print_weights.begin(); it != print_weights.end(); it++) {
+ cout << setw(16) << *it << " = " << lambdas.get(FD::Convert(*it)) << endl;
}
cout << " ---" << endl;
- cout << _np << " avg score: " << avg_1best_score;
- cout << _p << " (" << avg_1best_score_diff << ")" << endl;
- cout << _np << "avg model score: " << avg_1best_model;
- cout << _p << " (" << avg_1best_model_diff << ")" << endl;
+ cout << _np << " 1best avg score: " << score_avg;
+ cout << _p << " (" << score_diff << ")" << endl;
+ cout << _np << "1best avg model score: " << model_avg;
+ cout << _p << " (" << model_diff << ")" << endl;
}
- vector remember_scores;
- remember_scores.push_back(avg_1best_score);
- remember_scores.push_back(avg_1best_model);
- scores_per_iter.push_back(remember_scores);
- if (avg_1best_score > max_score) {
- max_score = avg_1best_score;
- best_t = t;
+ pair remember;
+ remember.first = score_avg;
+ remember.second = model_avg;
+ all_scores.push_back(remember);
+ if (score_avg > max_score) {
+ max_score = score_avg;
+ best_it = t;
}
time (&end);
- double time_dif = difftime(end, start);
- overall_time += time_dif;
+ float time_diff = difftime(end, start);
+ overall_time += time_diff;
if (!quiet) {
- cout << _p2 << _np << "(time " << time_dif/60. << " min, ";
- cout << time_dif/(double)in_sz<< " s/S)" << endl;
+ cout << _p2 << _np << "(time " << time_diff/60. << " min, ";
+ cout << time_diff/(float)in_sz<< " s/S)" << endl;
}
-
if (t+1 != T && !quiet) cout << endl;
if (noup) break;
} // outer loop
- //unlink(grammar_buf_fn);
+ unlink(grammar_buf_fn);
if (!noup) {
if (!quiet) cout << endl << "writing weights file '" << cfg["output"].as() << "' ...";
@@ -452,7 +434,7 @@ main(int argc, char** argv)
cout << _np << FD::Convert(ti->first) << "\t" << ti->second << endl;
}
if (hstreaming) cout << "__SHARD_COUNT__\t1" << endl;
- } else {
+ } else if (cfg["output"].as() != "VOID") {
weights.InitFromVector(lambdas);
weights.WriteToFile(cfg["output"].as(), true);
}
@@ -461,7 +443,7 @@ main(int argc, char** argv)
if (!quiet) {
cout << _p5 << _np << endl << "---" << endl << "Best iteration: ";
- cout << best_t+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl;
+ cout << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl;
cout << _p2 << "This took " << overall_time/60. << " min." << endl;
}
diff --git a/dtrain/kbestget.h b/dtrain/kbestget.h
index cf466fe4..79201182 100644
--- a/dtrain/kbestget.h
+++ b/dtrain/kbestget.h
@@ -1,19 +1,14 @@
#ifndef _DTRAIN_KBESTGET_H_
#define _DTRAIN_KBESTGET_H_
-
#include "kbest.h"
-
namespace dtrain
{
-/*
- * KBestList
- *
- */
-struct KBestList {
+struct Samples
+{
vector > feats;
vector > sents;
vector model_scores;
@@ -21,71 +16,71 @@ struct KBestList {
size_t GetSize() { return sents.size(); }
};
+struct HypoSampler : public DecoderObserver
+{
+ virtual Samples* GetSamples() {}
+};
-/*
- * KBestGetter
- *
- */
-struct KBestGetter : public DecoderObserver
+struct KBestGetter : public HypoSampler
{
const size_t k_;
const string filter_type;
- KBestList kb;
+ Samples s;
- KBestGetter( const size_t k, const string filter_type ) :
+ KBestGetter(const size_t k, const string filter_type) :
k_(k), filter_type(filter_type) {}
virtual void
- NotifyTranslationForest( const SentenceMetadata& smeta, Hypergraph* hg )
+ NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg)
{
- KBest( *hg );
+ KBest(*hg);
}
- KBestList* GetKBest() { return &kb; }
+ Samples* GetSamples() { return &s; }
void
- KBest( const Hypergraph& forest )
+ KBest(const Hypergraph& forest)
{
- if ( filter_type == "unique" ) {
- KBestUnique( forest );
- } else if ( filter_type == "no" ) {
- KBestNoFilter( forest );
+ if (filter_type == "unique") {
+ KBestUnique(forest);
+ } else if (filter_type == "no") {
+ KBestNoFilter(forest);
}
}
void
- KBestUnique( const Hypergraph& forest )
+ KBestUnique(const Hypergraph& forest)
{
- kb.sents.clear();
- kb.feats.clear();
- kb.model_scores.clear();
- kb.scores.clear();
- KBest::KBestDerivations, ESentenceTraversal, KBest::FilterUnique, prob_t, EdgeProb> kbest( forest, k_ );
- for ( size_t i = 0; i < k_; ++i ) {
+ s.sents.clear();
+ s.feats.clear();
+ s.model_scores.clear();
+ s.scores.clear();
+ KBest::KBestDerivations, ESentenceTraversal, KBest::FilterUnique, prob_t, EdgeProb> kbest(forest, k_);
+ for (size_t i = 0; i < k_; ++i) {
const KBest::KBestDerivations, ESentenceTraversal, KBest::FilterUnique, prob_t, EdgeProb>::Derivation* d =
- kbest.LazyKthBest( forest.nodes_.size() - 1, i );
+ kbest.LazyKthBest(forest.nodes_.size() - 1, i);
if (!d) break;
- kb.sents.push_back( d->yield);
- kb.feats.push_back( d->feature_values );
- kb.model_scores.push_back( log(d->score) );
+ s.sents.push_back(d->yield);
+ s.feats.push_back(d->feature_values);
+ s.model_scores.push_back(log(d->score));
}
}
void
- KBestNoFilter( const Hypergraph& forest )
+ KBestNoFilter(const Hypergraph& forest)
{
- kb.sents.clear();
- kb.feats.clear();
- kb.model_scores.clear();
- kb.scores.clear();
- KBest::KBestDerivations, ESentenceTraversal> kbest( forest, k_ );
- for ( size_t i = 0; i < k_; ++i ) {
+ s.sents.clear();
+ s.feats.clear();
+ s.model_scores.clear();
+ s.scores.clear();
+ KBest::KBestDerivations, ESentenceTraversal> kbest(forest, k_);
+ for (size_t i = 0; i < k_; ++i) {
const KBest::KBestDerivations, ESentenceTraversal>::Derivation* d =
- kbest.LazyKthBest( forest.nodes_.size() - 1, i );
+ kbest.LazyKthBest(forest.nodes_.size() - 1, i);
if (!d) break;
- kb.sents.push_back( d->yield);
- kb.feats.push_back( d->feature_values );
- kb.model_scores.push_back( log(d->score) );
+ s.sents.push_back(d->yield);
+ s.feats.push_back(d->feature_values);
+ s.model_scores.push_back(log(d->score));
}
}
};
@@ -93,6 +88,5 @@ struct KBestGetter : public DecoderObserver
} // namespace
-
#endif
diff --git a/dtrain/ksampler.h b/dtrain/ksampler.h
index 914e9723..ac88b643 100644
--- a/dtrain/ksampler.h
+++ b/dtrain/ksampler.h
@@ -1,21 +1,22 @@
#ifndef _DTRAIN_KSAMPLER_H_
#define _DTRAIN_KSAMPLER_H_
-#include "kbest.h"
#include "hgsampler.h"
+#include "kbest.h" // cdec
#include "sampler.h"
namespace dtrain
{
+
/*
* KSampler
*
*/
-struct KSampler : public DecoderObserver
+struct KSampler : public HypoSampler
{
const size_t k_;
- KBestList kb;
+ Samples s;
MT19937* rng;
explicit KSampler( const size_t k, MT19937* prng ) :
@@ -27,19 +28,19 @@ struct KSampler : public DecoderObserver
Sample( *hg );
}
- KBestList* GetKBest() { return &kb; }
+ Samples* GetSamples() { return &s; }
void Sample( const Hypergraph& forest ) {
- kb.sents.clear();
- kb.feats.clear();
- kb.model_scores.clear();
- kb.scores.clear();
+ s.sents.clear();
+ s.feats.clear();
+ s.model_scores.clear();
+ s.scores.clear();
std::vector samples;
HypergraphSampler::sample_hypotheses(forest, k_, rng, &samples);
for ( size_t i = 0; i < k_; ++i ) {
- kb.sents.push_back( samples[i].words );
- kb.feats.push_back( samples[i].fmap );
- kb.model_scores.push_back( log(samples[i].model_score) );
+ s.sents.push_back( samples[i].words );
+ s.feats.push_back( samples[i].fmap );
+ s.model_scores.push_back( log(samples[i].model_score) );
}
}
};
@@ -47,6 +48,5 @@ struct KSampler : public DecoderObserver
} // namespace
-
#endif
diff --git a/dtrain/pairsampling.h b/dtrain/pairsampling.h
index e06036ca..a8521485 100644
--- a/dtrain/pairsampling.h
+++ b/dtrain/pairsampling.h
@@ -8,21 +8,19 @@ namespace dtrain
{
-struct TPair
+struct Pair
{
SparseVector first, second;
size_t first_rank, second_rank;
double first_score, second_score;
};
-typedef vector TrainingInstances;
-
inline void
-sample_all_pairs(KBestList* kb, TrainingInstances &training)
+sample_all_pairs(Samples* kb, vector &training)
{
for (size_t i = 0; i < kb->GetSize()-1; i++) {
for (size_t j = i+1; j < kb->GetSize(); j++) {
- TPair p;
+ Pair p;
p.first = kb->feats[i];
p.second = kb->feats[j];
p.first_rank = i;
@@ -35,12 +33,12 @@ sample_all_pairs(KBestList* kb, TrainingInstances &training)
}
inline void
-sample_rand_pairs(KBestList* kb, TrainingInstances &training, MT19937* prng)
+sample_rand_pairs(Samples* kb, vector &training, MT19937* prng)
{
for (size_t i = 0; i < kb->GetSize()-1; i++) {
for (size_t j = i+1; j < kb->GetSize(); j++) {
if (prng->next() < .5) {
- TPair p;
+ Pair p;
p.first = kb->feats[i];
p.second = kb->feats[j];
p.first_rank = i;
diff --git a/dtrain/score.cc b/dtrain/score.cc
index d08e87f3..c6d3a05f 100644
--- a/dtrain/score.cc
+++ b/dtrain/score.cc
@@ -47,27 +47,27 @@ make_ngram_counts(vector hyp, vector ref, size_t N)
*
* NOTE: 0 if one n in {1..N} has 0 count
*/
-double
+score_t
brevity_penaly(const size_t hyp_len, const size_t ref_len)
{
if (hyp_len > ref_len) return 1;
- return exp(1 - (double)ref_len/(double)hyp_len);
+ return exp(1 - (score_t)ref_len/(score_t)hyp_len);
}
-double
+score_t
bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
- size_t N, vector weights )
+ size_t N, vector weights )
{
if (hyp_len == 0 || ref_len == 0) return 0;
if (ref_len < N) N = ref_len;
- float N_ = (float)N;
+ score_t N_ = (score_t)N;
if (weights.empty())
{
for (size_t i = 0; i < N; i++) weights.push_back(1/N_);
}
- double sum = 0;
+ score_t sum = 0;
for (size_t i = 0; i < N; i++) {
if (counts.clipped[i] == 0 || counts.sum[i] == 0) return 0;
- sum += weights[i] * log((double)counts.clipped[i] / (double)counts.sum[i]);
+ sum += weights[i] * log((score_t)counts.clipped[i] / (score_t)counts.sum[i]);
}
return brevity_penaly(hyp_len, ref_len) * exp(sum);
}
@@ -82,22 +82,22 @@ bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
*
* NOTE: 0 iff no 1gram match
*/
-double
+score_t
stupid_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
- size_t N, vector weights )
+ size_t N, vector weights )
{
if (hyp_len == 0 || ref_len == 0) return 0;
if (ref_len < N) N = ref_len;
- float N_ = (float)N;
+ score_t N_ = (score_t)N;
if (weights.empty())
{
for (size_t i = 0; i < N; i++) weights.push_back(1/N_);
}
- double sum = 0;
- float add = 0;
+ score_t sum = 0;
+ score_t add = 0;
for (size_t i = 0; i < N; i++) {
if (i == 1) add = 1;
- sum += weights[i] * log(((double)counts.clipped[i] + add) / ((double)counts.sum[i] + add));
+ sum += weights[i] * log(((score_t)counts.clipped[i] + add) / ((score_t)counts.sum[i] + add));
}
return brevity_penaly(hyp_len, ref_len) * exp(sum);
}
@@ -111,21 +111,21 @@ stupid_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
*
* NOTE: max is 0.9375
*/
-double
+score_t
smooth_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
- const size_t N, vector weights )
+ const size_t N, vector weights )
{
if (hyp_len == 0 || ref_len == 0) return 0;
- float N_ = (float)N;
+ score_t N_ = (score_t)N;
if (weights.empty())
{
for (size_t i = 0; i < N; i++) weights.push_back(1/N_);
}
- double sum = 0;
- float j = 1;
+ score_t sum = 0;
+ score_t j = 1;
for (size_t i = 0; i < N; i++) {
if (counts.clipped[i] == 0 || counts.sum[i] == 0) continue;
- sum += exp((weights[i] * log((double)counts.clipped[i]/(double)counts.sum[i]))) / pow(2, N_-j+1);
+ sum += exp((weights[i] * log((score_t)counts.clipped[i]/(score_t)counts.sum[i]))) / pow(2, N_-j+1);
j++;
}
return brevity_penaly(hyp_len, ref_len) * sum;
@@ -138,9 +138,9 @@ smooth_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
* and Structural Translation Features"
* (Chiang et al. '08)
*/
-double
+score_t
approx_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len,
- const size_t N, vector weights)
+ const size_t N, vector weights)
{
return brevity_penaly(hyp_len, ref_len)
* 0.9 * bleu(counts, hyp_len, ref_len, N, weights);
diff --git a/dtrain/score.h b/dtrain/score.h
index 0afb6237..bff0b10c 100644
--- a/dtrain/score.h
+++ b/dtrain/score.h
@@ -15,15 +15,16 @@ namespace dtrain
{
+typedef double score_t; // float
+
struct NgramCounts
{
- NgramCounts(const size_t N) : N_(N) {
- reset();
- }
size_t N_;
map clipped;
map sum;
+ NgramCounts(const size_t N) : N_(N) { reset(); }
+
void
operator+=(const NgramCounts& rhs)
{
@@ -76,22 +77,22 @@ struct NgramCounts
};
typedef map, size_t> Ngrams;
+
Ngrams make_ngrams(vector& s, size_t N);
NgramCounts make_ngram_counts(vector hyp, vector ref, size_t N);
-double brevity_penaly(const size_t hyp_len, const size_t ref_len);
-double bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
- vector weights = vector());
-double stupid_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, size_t N,
- vector weights = vector());
-double smooth_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
- vector weights = vector());
-double approx_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
- vector weights = vector());
+score_t brevity_penaly(const size_t hyp_len, const size_t ref_len);
+score_t bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
+ vector weights = vector());
+score_t stupid_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, size_t N,
+ vector weights = vector());
+score_t smooth_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
+ vector weights = vector());
+score_t approx_bleu(NgramCounts& counts, const size_t hyp_len, const size_t ref_len, const size_t N,
+ vector weights = vector());
} // namespace
-
#endif
diff --git a/dtrain/test/cdec_toy/cdec.ini b/dtrain/test/cdec_toy/cdec.ini
index 3a6bab68..9eb34512 100644
--- a/dtrain/test/cdec_toy/cdec.ini
+++ b/dtrain/test/cdec_toy/cdec.ini
@@ -1,3 +1,4 @@
formalism=scfg
grammar=../dtrain/test/toy_cdec/grammar
add_pass_through_rules=true
+weights=../dtrain/test/toy_cdec/weights
diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini
new file mode 100644
index 00000000..cdc8a8bb
--- /dev/null
+++ b/dtrain/test/example/cdec.ini
@@ -0,0 +1,7 @@
+formalism=scfg
+add_pass_through_rules=true
+cubepruning_pop_limit=30
+scfg_max_span_limit=15
+feature_function=WordPenalty
+feature_function=KLanguageModel /home/pks/z/X/x/dtrain/test/example/nc-wmt11.en.srilm.gz
+#feature_function=RuleIdentityFeatures
diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini
new file mode 100644
index 00000000..aee3c89e
--- /dev/null
+++ b/dtrain/test/example/dtrain.ini
@@ -0,0 +1,11 @@
+decoder_config=test/example/cdec.ini
+ksamples=100
+ngrams=3
+epochs=1000
+input=test/example/nc-1k.gz
+scorer=stupid_bleu
+output=test/example/weights.gz
+stop_after=10
+sample_from=kbest
+pair_sampling=all
+print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PassThrough
diff --git a/dtrain/test/example/nc-1k-tabs.gz b/dtrain/test/example/nc-1k-tabs.gz
new file mode 100644
index 00000000..45496cd8
Binary files /dev/null and b/dtrain/test/example/nc-1k-tabs.gz differ
diff --git a/dtrain/test/example/nc-1k.gz b/dtrain/test/example/nc-1k.gz
new file mode 100644
index 00000000..f638a166
Binary files /dev/null and b/dtrain/test/example/nc-1k.gz differ
diff --git a/dtrain/test/example/nc-wmt11.en.srilm.gz b/dtrain/test/example/nc-wmt11.en.srilm.gz
new file mode 100644
index 00000000..7ce81057
Binary files /dev/null and b/dtrain/test/example/nc-wmt11.en.srilm.gz differ
diff --git a/dtrain/test/example/weights.gz b/dtrain/test/example/weights.gz
new file mode 100644
index 00000000..21157427
Binary files /dev/null and b/dtrain/test/example/weights.gz differ
diff --git a/dtrain/test/log_reg_dyer/bin_class.cc b/dtrain/test/log_reg_dyer/bin_class.cc
new file mode 100644
index 00000000..19bcde25
--- /dev/null
+++ b/dtrain/test/log_reg_dyer/bin_class.cc
@@ -0,0 +1,4 @@
+#include "bin_class.h"
+
+Objective::~Objective() {}
+
diff --git a/dtrain/test/log_reg_dyer/bin_class.h b/dtrain/test/log_reg_dyer/bin_class.h
new file mode 100644
index 00000000..3466109a
--- /dev/null
+++ b/dtrain/test/log_reg_dyer/bin_class.h
@@ -0,0 +1,22 @@
+#ifndef _BIN_CLASS_H_
+#define _BIN_CLASS_H_
+
+#include
+#include "sparse_vector.h"
+
+struct TrainingInstance {
+ // TODO add other info? loss for MIRA-type updates?
+ SparseVector x_feature_map;
+ bool y;
+};
+
+struct Objective {
+ virtual ~Objective();
+
+ // returns f(x) and f'(x)
+ virtual double ObjectiveAndGradient(const SparseVector& x,
+ const std::vector& training_instances,
+ SparseVector* g) const = 0;
+};
+
+#endif
diff --git a/dtrain/test/log_reg_dyer/log_reg.cc b/dtrain/test/log_reg_dyer/log_reg.cc
new file mode 100644
index 00000000..ec2331fe
--- /dev/null
+++ b/dtrain/test/log_reg_dyer/log_reg.cc
@@ -0,0 +1,39 @@
+#include "log_reg.h"
+
+#include
+#include
+
+#include "sparse_vector.h"
+
+using namespace std;
+
+double LogisticRegression::ObjectiveAndGradient(const SparseVector& x,
+ const vector& training_instances,
+ SparseVector* g) const {
+ double cll = 0;
+ for (int i = 0; i < training_instances.size(); ++i) {
+ const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0]
+ double lp_false = dotprod;
+ double lp_true = -dotprod;
+ if (0 < lp_true) {
+ lp_true += log1p(exp(-lp_true));
+ lp_false = log1p(exp(lp_false));
+ } else {
+ lp_true = log1p(exp(lp_true));
+ lp_false += log1p(exp(-lp_false));
+ }
+ lp_true *= -1;
+ lp_false *= -1;
+ if (training_instances[i].y) { // true label
+ cll -= lp_true;
+ (*g) -= training_instances[i].x_feature_map * exp(lp_false);
+ // (*g)[0] -= exp(lp_false); // bias
+ } else { // false label
+ cll -= lp_false;
+ (*g) += training_instances[i].x_feature_map * exp(lp_true);
+ // g += corpus[i].second * exp(lp_true);
+ }
+ }
+ return cll;
+}
+
diff --git a/dtrain/test/log_reg_dyer/log_reg.h b/dtrain/test/log_reg_dyer/log_reg.h
new file mode 100644
index 00000000..ecc560b8
--- /dev/null
+++ b/dtrain/test/log_reg_dyer/log_reg.h
@@ -0,0 +1,14 @@
+#ifndef _LOG_REG_H_
+#define _LOG_REG_H_
+
+#include
+#include "sparse_vector.h"
+#include "bin_class.h"
+
+struct LogisticRegression : public Objective {
+ double ObjectiveAndGradient(const SparseVector& x,
+ const std::vector& training_instances,
+ SparseVector* g) const;
+};
+
+#endif
diff --git a/dtrain/test/logreg/bin_class.cc b/dtrain/test/logreg/bin_class.cc
deleted file mode 100644
index 19bcde25..00000000
--- a/dtrain/test/logreg/bin_class.cc
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "bin_class.h"
-
-Objective::~Objective() {}
-
diff --git a/dtrain/test/logreg/bin_class.h b/dtrain/test/logreg/bin_class.h
deleted file mode 100644
index 3466109a..00000000
--- a/dtrain/test/logreg/bin_class.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _BIN_CLASS_H_
-#define _BIN_CLASS_H_
-
-#include
-#include "sparse_vector.h"
-
-struct TrainingInstance {
- // TODO add other info? loss for MIRA-type updates?
- SparseVector x_feature_map;
- bool y;
-};
-
-struct Objective {
- virtual ~Objective();
-
- // returns f(x) and f'(x)
- virtual double ObjectiveAndGradient(const SparseVector& x,
- const std::vector& training_instances,
- SparseVector* g) const = 0;
-};
-
-#endif
diff --git a/dtrain/test/logreg/log_reg.cc b/dtrain/test/logreg/log_reg.cc
deleted file mode 100644
index ec2331fe..00000000
--- a/dtrain/test/logreg/log_reg.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-#include "log_reg.h"
-
-#include
-#include
-
-#include "sparse_vector.h"
-
-using namespace std;
-
-double LogisticRegression::ObjectiveAndGradient(const SparseVector& x,
- const vector& training_instances,
- SparseVector* g) const {
- double cll = 0;
- for (int i = 0; i < training_instances.size(); ++i) {
- const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0]
- double lp_false = dotprod;
- double lp_true = -dotprod;
- if (0 < lp_true) {
- lp_true += log1p(exp(-lp_true));
- lp_false = log1p(exp(lp_false));
- } else {
- lp_true = log1p(exp(lp_true));
- lp_false += log1p(exp(-lp_false));
- }
- lp_true *= -1;
- lp_false *= -1;
- if (training_instances[i].y) { // true label
- cll -= lp_true;
- (*g) -= training_instances[i].x_feature_map * exp(lp_false);
- // (*g)[0] -= exp(lp_false); // bias
- } else { // false label
- cll -= lp_false;
- (*g) += training_instances[i].x_feature_map * exp(lp_true);
- // g += corpus[i].second * exp(lp_true);
- }
- }
- return cll;
-}
-
diff --git a/dtrain/test/logreg/log_reg.h b/dtrain/test/logreg/log_reg.h
deleted file mode 100644
index ecc560b8..00000000
--- a/dtrain/test/logreg/log_reg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LOG_REG_H_
-#define _LOG_REG_H_
-
-#include
-#include "sparse_vector.h"
-#include "bin_class.h"
-
-struct LogisticRegression : public Objective {
- double ObjectiveAndGradient(const SparseVector& x,
- const std::vector& training_instances,
- SparseVector* g) const;
-};
-
-#endif
diff --git a/dtrain/test/toy_example/dtrain.ini b/dtrain/test/toy_example/dtrain.ini
index 0cc222e1..3ab4f8d4 100644
--- a/dtrain/test/toy_example/dtrain.ini
+++ b/dtrain/test/toy_example/dtrain.ini
@@ -1,5 +1,5 @@
decoder_config=test/toy_example/cdec.ini
-kbest=4
+ksamples=4
ngrams=3
epochs=2
input=test/toy_example/toy.in
--
cgit v1.2.3