From 33d4601da5e2e715260619a38f5899645d157952 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Tue, 8 May 2012 19:45:10 -0400 Subject: switch to liblbfgs for pro --- pro-train/mr_pro_reduce.cc | 89 +++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 49 deletions(-) (limited to 'pro-train/mr_pro_reduce.cc') diff --git a/pro-train/mr_pro_reduce.cc b/pro-train/mr_pro_reduce.cc index 6362ce47..d3fb8026 100644 --- a/pro-train/mr_pro_reduce.cc +++ b/pro-train/mr_pro_reduce.cc @@ -11,6 +11,7 @@ #include "weights.h" #include "sparse_vector.h" #include "optimize.h" +#include "liblbfgs/lbfgs++.h" using namespace std; namespace po = boost::program_options; @@ -89,10 +90,10 @@ void ReadCorpus(istream* pin, vector > >* corp if (flag) cerr << endl; } -void GradAdd(const SparseVector& v, const double scale, vector* acc) { +void GradAdd(const SparseVector& v, const double scale, weight_t* acc) { for (SparseVector::const_iterator it = v.begin(); it != v.end(); ++it) { - (*acc)[it->first] += it->second * scale; + acc[it->first] += it->second * scale; } } @@ -100,26 +101,24 @@ double ApplyRegularizationTerms(const double C, const double T, const vector& weights, const vector& prev_weights, - vector* g) { - assert(weights.size() == g->size()); + weight_t* g) { double reg = 0; for (size_t i = 0; i < weights.size(); ++i) { const double prev_w_i = (i < prev_weights.size() ? prev_weights[i] : 0.0); const double& w_i = weights[i]; - double& g_i = (*g)[i]; reg += C * w_i * w_i; - g_i += 2 * C * w_i; + g[i] += 2 * C * w_i; const double diff_i = w_i - prev_w_i; reg += T * diff_i * diff_i; - g_i += 2 * T * diff_i; + g[i] += 2 * T * diff_i; } return reg; } double TrainingInference(const vector& x, const vector > >& corpus, - vector* g = NULL) { + weight_t* g = NULL) { double cll = 0; for (int i = 0; i < corpus.size(); ++i) { const double dotprod = corpus[i].second.dot(x) + (x.size() ? x[0] : weight_t()); // x[0] is bias @@ -139,20 +138,44 @@ double TrainingInference(const vector& x, if (g) { // g -= corpus[i].second * exp(lp_false); GradAdd(corpus[i].second, -exp(lp_false), g); - (*g)[0] -= exp(lp_false); // bias + g[0] -= exp(lp_false); // bias } } else { // false label cll -= lp_false; if (g) { // g += corpus[i].second * exp(lp_true); GradAdd(corpus[i].second, exp(lp_true), g); - (*g)[0] += exp(lp_true); // bias + g[0] += exp(lp_true); // bias } } } return cll; } +struct ProLoss { + ProLoss(const vector > >& tr, + const vector > >& te, + const double c, + const double t, + const vector& px) : training(tr), testing(te), C(c), T(t), prev_x(px){} + double operator()(const vector& x, double* g) const { + fill(g, g + x.size(), 0.0); + double cll = TrainingInference(x, training, g); + tppl = 0; + if (testing.size()) + tppl = pow(2.0, TrainingInference(x, testing, g) / (log(2) * testing.size())); + double ppl = cll / log(2); + ppl /= training.size(); + ppl = pow(2.0, ppl); + double reg = ApplyRegularizationTerms(C, T, x, prev_x, g); + return cll + reg; + } + const vector > >& training, testing; + const double C, T; + const vector& prev_x; + mutable double tppl; +}; + // return held-out log likelihood double LearnParameters(const vector > >& training, const vector > >& testing, @@ -161,42 +184,10 @@ double LearnParameters(const vector > >& train const unsigned memory_buffers, const vector& prev_x, vector* px) { - vector& x = *px; - vector vg(FD::NumFeats(), 0.0); - bool converged = false; - LBFGSOptimizer opt(FD::NumFeats(), memory_buffers); - double tppl = 0.0; - while(!converged) { - fill(vg.begin(), vg.end(), 0.0); - double cll = TrainingInference(x, training, &vg); - double ppl = cll / log(2); - ppl /= training.size(); - ppl = pow(2.0, ppl); - - // evaluate optional held-out test set - if (testing.size()) { - tppl = TrainingInference(x, testing) / log(2); - tppl /= testing.size(); - tppl = pow(2.0, tppl); - } - - // handle regularizer - double reg = ApplyRegularizationTerms(C, T, x, prev_x, &vg); - cll += reg; - cerr << cll << " (REG=" << reg << ")\tPPL=" << ppl << "\t TEST_PPL=" << tppl << "\t" << endl; - try { - opt.Optimize(cll, vg, &x); - converged = opt.HasConverged(); - } catch (...) { - cerr << "Exception caught, assuming convergence is close enough...\n"; - converged = true; - } - if (fabs(x[0]) > MAX_BIAS) { - cerr << "Biased model learned. Are your training instances wrong?\n"; - cerr << " BIAS: " << x[0] << endl; - } - } - return tppl; + ProLoss loss(training, testing, C, T, prev_x); + LBFGS lbfgs(px, loss, 0.0, memory_buffers); + lbfgs.MinimizeFunction(); + return loss.tppl; } int main(int argc, char** argv) { @@ -213,9 +204,9 @@ int main(int argc, char** argv) { const double max_reg = conf["max_reg"].as(); double C = conf["regularization_strength"].as(); // will be overridden if parameter is tuned const double T = conf["regularize_to_weights"].as(); - assert(C > 0.0); - assert(min_reg > 0.0); - assert(max_reg > 0.0); + assert(C >= 0.0); + assert(min_reg >= 0.0); + assert(max_reg >= 0.0); assert(max_reg > min_reg); const double psi = conf["interpolate_with_weights"].as(); if (psi < 0.0 || psi > 1.0) { cerr << "Invalid interpolation weight: " << psi << endl; return 1; } -- cgit v1.2.3