summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2012-10-22 12:07:20 +0100
committerKenneth Heafield <github@kheafield.com>2012-10-22 12:07:20 +0100
commit5f98fe5c4f2a2090eeb9d30c030305a70a8347d1 (patch)
tree9b6002f850e6dea1e3400c6b19bb31a9cdf3067f /training
parentcf9994131993b40be62e90e213b1e11e6b550143 (diff)
parent21825a09d97c2e0afd20512f306fb25fed55e529 (diff)
Merge remote branch 'upstream/master'
Conflicts: Jamroot bjam decoder/Jamfile decoder/cdec.cc dpmert/Jamfile jam-files/sanity.jam klm/lm/Jamfile klm/util/Jamfile mira/Jamfile
Diffstat (limited to 'training')
-rw-r--r--training/Jamfile25
-rw-r--r--training/Makefile.am6
-rw-r--r--training/cllh_observer.cc2
-rw-r--r--training/collapse_weights.cc2
-rw-r--r--training/fast_align.cc (renamed from training/model1.cc)79
-rw-r--r--training/liblbfgs/Jamfile5
-rw-r--r--training/mpi_batch_optimize.cc2
-rw-r--r--training/mpi_online_optimize.cc4
-rw-r--r--training/mr_optimize_reduce.cc4
9 files changed, 58 insertions, 71 deletions
diff --git a/training/Jamfile b/training/Jamfile
deleted file mode 100644
index 073451fa..00000000
--- a/training/Jamfile
+++ /dev/null
@@ -1,25 +0,0 @@
-import testing ;
-import option ;
-
-lib training :
- ..//utils
- ..//mteval
- ..//decoder
- ../klm/lm//kenlm
- ..//boost_program_options
- ttables.cc
- : <include>.
- : :
- <library>..//decoder
- <library>../klm/lm//kenlm
- <library>..//utils
- <library>..//mteval
- <library>..//boost_program_options
- ;
-
-exe model1 : model1.cc : <include>../decoder ;
-
-# // all_tests [ glob *_test.cc ] : ..//decoder : <testing.arg>$(TOP)/decoder/test_data ;
-
-alias programs : model1 ;
-
diff --git a/training/Makefile.am b/training/Makefile.am
index 4cef0d5b..5254333a 100644
--- a/training/Makefile.am
+++ b/training/Makefile.am
@@ -1,5 +1,5 @@
bin_PROGRAMS = \
- model1 \
+ fast_align \
lbl_model \
test_ngram \
mr_em_map_adapter \
@@ -55,8 +55,8 @@ augment_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/lib
test_ngram_SOURCES = test_ngram.cc
test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
-model1_SOURCES = model1.cc ttables.cc
-model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
+fast_align_SOURCES = fast_align.cc ttables.cc
+fast_align_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
lbl_model_SOURCES = lbl_model.cc
lbl_model_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
diff --git a/training/cllh_observer.cc b/training/cllh_observer.cc
index 58232769..4ec2fa65 100644
--- a/training/cllh_observer.cc
+++ b/training/cllh_observer.cc
@@ -45,7 +45,7 @@ void ConditionalLikelihoodObserver::NotifyAlignmentForest(const SentenceMetadata
cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl;
exit(1);
}
- assert(!isnan(log_ref_z));
+ assert(!std::isnan(log_ref_z));
acc_obj += (cur_obj - log_ref_z);
trg_words += smeta.GetReference().size();
}
diff --git a/training/collapse_weights.cc b/training/collapse_weights.cc
index dc480f6c..c03eb031 100644
--- a/training/collapse_weights.cc
+++ b/training/collapse_weights.cc
@@ -95,7 +95,7 @@ int main(int argc, char** argv) {
if (line.empty()) continue;
TRule tr(line, true);
const double lp = tr.GetFeatureValues().dot(w);
- if (isinf(lp)) { continue; }
+ if (std::isinf(lp)) { continue; }
tr.scores_.clear();
cout << tr.AsString() << " ||| F_and_E=" << lp - log(tot);
diff --git a/training/model1.cc b/training/fast_align.cc
index 19692b9a..7492d26f 100644
--- a/training/model1.cc
+++ b/training/fast_align.cc
@@ -17,18 +17,21 @@ using namespace std;
bool InitCommandLine(int argc, char** argv, po::variables_map* conf) {
po::options_description opts("Configuration options");
opts.add_options()
- ("iterations,i",po::value<unsigned>()->default_value(5),"Number of iterations of EM training")
- ("beam_threshold,t",po::value<double>()->default_value(-4),"log_10 of beam threshold (-10000 to include everything, 0 max)")
- ("bidir,b", "Run bidirectional alignment")
- ("no_null_word,N","Do not generate from the null token")
- ("write_alignments,A", "Write alignments instead of parameters")
+ ("input,i",po::value<string>(),"Parallel corpus input file")
+ ("reverse,r","Reverse estimation (swap source and target during training)")
+ ("iterations,I",po::value<unsigned>()->default_value(5),"Number of iterations of EM training")
+ //("bidir,b", "Run bidirectional alignment")
("favor_diagonal,d", "Use a static alignment distribution that assigns higher probabilities to alignments near the diagonal")
- ("diagonal_tension,T", po::value<double>()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)")
("prob_align_null", po::value<double>()->default_value(0.08), "When --favor_diagonal is set, what's the probability of a null alignment?")
- ("variational_bayes,v","Add a symmetric Dirichlet prior and infer VB estimate of weights")
- ("testset,x", po::value<string>(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model")
+ ("diagonal_tension,T", po::value<double>()->default_value(4.0), "How sharp or flat around the diagonal is the alignment distribution (<1 = flat >1 = sharp)")
+ ("variational_bayes,v","Infer VB estimate of parameters under a symmetric Dirichlet prior")
("alpha,a", po::value<double>()->default_value(0.01), "Hyperparameter for optional Dirichlet prior")
- ("no_add_viterbi,V","Do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)");
+ ("no_null_word,N","Do not generate from a null token")
+ ("output_parameters,p", "Write model parameters instead of alignments")
+ ("beam_threshold,t",po::value<double>()->default_value(-4),"When writing parameters, log_10 of beam threshold for writing parameter (-10000 to include everything, 0 max parameter only)")
+ ("hide_training_alignments,H", "Hide training alignments (only useful if you want to use -x option and just compute testset statistics)")
+ ("testset,x", po::value<string>(), "After training completes, compute the log likelihood of this set of sentence pairs under the learned model")
+ ("no_add_viterbi,V","When writing model parameters, do not add Viterbi alignment points (may generate a grammar where some training sentence pairs are unreachable)");
po::options_description clo("Command line options");
clo.add_options()
("config", po::value<string>(), "Configuration file")
@@ -44,36 +47,29 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) {
}
po::notify(*conf);
- if (argc < 2 || conf->count("help")) {
- cerr << "Usage " << argv[0] << " [OPTIONS] corpus.fr-en\n";
+ if (conf->count("help") || conf->count("input") == 0) {
+ cerr << "Usage " << argv[0] << " [OPTIONS] -i corpus.fr-en\n";
cerr << dcmdline_options << endl;
return false;
}
return true;
}
-// src and trg are source and target strings, respectively (not really lattices)
-double PosteriorInference(const vector<WordID>& src, const vector<WordID>& trg) {
- double llh = 0;
- static vector<double> unnormed_a_i;
- if (src.size() > unnormed_a_i.size())
- unnormed_a_i.resize(src.size());
- return llh;
-}
-
int main(int argc, char** argv) {
po::variables_map conf;
if (!InitCommandLine(argc, argv, &conf)) return 1;
- const string fname = argv[argc - 1];
+ const string fname = conf["input"].as<string>();
+ const bool reverse = conf.count("reverse") > 0;
const int ITERATIONS = conf["iterations"].as<unsigned>();
const double BEAM_THRESHOLD = pow(10.0, conf["beam_threshold"].as<double>());
const bool use_null = (conf.count("no_null_word") == 0);
const WordID kNULL = TD::Convert("<eps>");
const bool add_viterbi = (conf.count("no_add_viterbi") == 0);
const bool variational_bayes = (conf.count("variational_bayes") > 0);
- const bool write_alignments = (conf.count("write_alignments") > 0);
+ const bool write_alignments = (conf.count("output_parameters") == 0);
const double diagonal_tension = conf["diagonal_tension"].as<double>();
const double prob_align_null = conf["prob_align_null"].as<double>();
+ const bool hide_training_alignments = (conf.count("hide_training_alignments") > 0);
string testset;
if (conf.count("testset")) testset = conf["testset"].as<string>();
const double prob_align_not_null = 1.0 - prob_align_null;
@@ -100,14 +96,16 @@ int main(int argc, char** argv) {
bool flag = false;
string line;
string ssrc, strg;
+ vector<WordID> src, trg;
while(true) {
getline(in, line);
if (!in) break;
++lc;
if (lc % 1000 == 0) { cerr << '.'; flag = true; }
if (lc %50000 == 0) { cerr << " [" << lc << "]\n" << flush; flag = false; }
- vector<WordID> src, trg;
+ src.clear(); trg.clear();
CorpusTools::ReadLine(line, &src, &trg);
+ if (reverse) swap(src, trg);
if (src.size() == 0 || trg.size() == 0) {
cerr << "Error: " << lc << "\n" << line << endl;
return 1;
@@ -160,10 +158,13 @@ int main(int argc, char** argv) {
max_i = src[i-1];
}
}
- if (write_alignments) {
+ if (!hide_training_alignments && write_alignments) {
if (max_index > 0) {
if (first_al) first_al = false; else cout << ' ';
- cout << (max_index - 1) << "-" << j;
+ if (reverse)
+ cout << j << '-' << (max_index - 1);
+ else
+ cout << (max_index - 1) << '-' << j;
}
}
s2t_viterbi[max_i][f_j] = 1.0;
@@ -176,7 +177,7 @@ int main(int argc, char** argv) {
}
likelihood += log(sum);
}
- if (write_alignments && final_iteration) cout << endl;
+ if (write_alignments && final_iteration && !hide_training_alignments) cout << endl;
}
// log(e) = 1.0
@@ -203,11 +204,13 @@ int main(int argc, char** argv) {
istream& in = *rf.stream();
int lc = 0;
double tlp = 0;
- string ssrc, strg, line;
+ string line;
while (getline(in, line)) {
++lc;
vector<WordID> src, trg;
CorpusTools::ReadLine(line, &src, &trg);
+ cout << TD::GetString(src) << " ||| " << TD::GetString(trg) << " |||";
+ if (reverse) swap(src, trg);
double log_prob = Md::log_poisson(trg.size(), 0.05 + src.size() * mean_srclen_multiplier);
if (src.size() > unnormed_a_i.size())
unnormed_a_i.resize(src.size());
@@ -216,11 +219,14 @@ int main(int argc, char** argv) {
for (int j = 0; j < trg.size(); ++j) {
const WordID& f_j = trg[j];
double sum = 0;
+ int a_j = 0;
+ double max_pat = 0;
const double j_over_ts = double(j) / trg.size();
double prob_a_i = 1.0 / (src.size() + use_null); // uniform (model 1)
if (use_null) {
if (favor_diagonal) prob_a_i = prob_align_null;
- sum += s2t.prob(kNULL, f_j) * prob_a_i;
+ max_pat = s2t.prob(kNULL, f_j) * prob_a_i;
+ sum += max_pat;
}
double az = 0;
if (favor_diagonal) {
@@ -233,13 +239,24 @@ int main(int argc, char** argv) {
for (int i = 1; i <= src.size(); ++i) {
if (favor_diagonal)
prob_a_i = unnormed_a_i[i-1] / az;
- sum += s2t.prob(src[i-1], f_j) * prob_a_i;
+ double pat = s2t.prob(src[i-1], f_j) * prob_a_i;
+ if (pat > max_pat) { max_pat = pat; a_j = i; }
+ sum += pat;
}
log_prob += log(sum);
+ if (write_alignments) {
+ if (a_j > 0) {
+ cout << ' ';
+ if (reverse)
+ cout << j << '-' << (a_j - 1);
+ else
+ cout << (a_j - 1) << '-' << j;
+ }
+ }
}
tlp += log_prob;
- cerr << ssrc << " ||| " << strg << " ||| " << log_prob << endl;
- }
+ cout << " ||| " << log_prob << endl << flush;
+ } // loop over test set sentences
cerr << "TOTAL LOG PROB " << tlp << endl;
}
diff --git a/training/liblbfgs/Jamfile b/training/liblbfgs/Jamfile
deleted file mode 100644
index 49c82748..00000000
--- a/training/liblbfgs/Jamfile
+++ /dev/null
@@ -1,5 +0,0 @@
-import testing ;
-
-lib liblbfgs : lbfgs.c : <include>.. ;
-
-unit-test ll_test : ll_test.cc liblbfgs : <include>.. ;
diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc
index 6432f4a2..2eff07e4 100644
--- a/training/mpi_batch_optimize.cc
+++ b/training/mpi_batch_optimize.cc
@@ -142,7 +142,7 @@ struct TrainingObserver : public DecoderObserver {
cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl;
exit(1);
}
- assert(!isnan(log_ref_z));
+ assert(!std::isnan(log_ref_z));
ref_exp -= cur_model_exp;
acc_grad -= ref_exp;
acc_obj += (cur_obj - log_ref_z);
diff --git a/training/mpi_online_optimize.cc b/training/mpi_online_optimize.cc
index 993627f0..d6968848 100644
--- a/training/mpi_online_optimize.cc
+++ b/training/mpi_online_optimize.cc
@@ -143,7 +143,7 @@ struct TrainingObserver : public DecoderObserver {
cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl;
exit(1);
}
- assert(!isnan(log_ref_z));
+ assert(!std::isnan(log_ref_z));
ref_exp -= cur_model_exp;
acc_grad += ref_exp;
acc_obj += (cur_obj - log_ref_z);
@@ -330,7 +330,7 @@ int main(int argc, char** argv) {
if (rank == 0) {
converged = (iter == max_iteration);
Weights::SanityCheck(lambdas);
- Weights::ShowLargestFeatures(lambdas);
+ static int cc = 0; ++cc; if (cc > 1) { Weights::ShowLargestFeatures(lambdas); }
string fname = "weights.cur.gz";
if (iter % write_weights_every_ith == 0) {
ostringstream o; o << "weights.epoch_" << (ai+1) << '.' << iter << ".gz";
diff --git a/training/mr_optimize_reduce.cc b/training/mr_optimize_reduce.cc
index 461e6b5f..d490192f 100644
--- a/training/mr_optimize_reduce.cc
+++ b/training/mr_optimize_reduce.cc
@@ -19,8 +19,8 @@ namespace po = boost::program_options;
void SanityCheck(const vector<double>& w) {
for (int i = 0; i < w.size(); ++i) {
- assert(!isnan(w[i]));
- assert(!isinf(w[i]));
+ assert(!std::isnan(w[i]));
+ assert(!std::isinf(w[i]));
}
}