From 716356a5de1eb7d066d511af51b76e1294609d87 Mon Sep 17 00:00:00 2001 From: redpony Date: Wed, 22 Sep 2010 18:15:39 +0000 Subject: mpi support for training git-svn-id: https://ws10smt.googlecode.com/svn/trunk@654 ec762483-ff6d-05da-a07a-a48fb63a330f --- configure.ac | 6 + decoder/translator.cc | 4 +- gi/pipeline/blacklight.config | 9 ++ training/Makefile.am | 14 ++ training/mpi_batch_optimize.cc | 326 ++++++++++++++++++++++++++++++++++++++++ training/mpi_online_optimize.cc | 322 +++++++++++++++++++++++++++++++++++++++ training/optimize.cc | 1 - training/optimize.h | 1 + 8 files changed, 681 insertions(+), 2 deletions(-) create mode 100644 gi/pipeline/blacklight.config create mode 100644 training/mpi_batch_optimize.cc create mode 100644 training/mpi_online_optimize.cc diff --git a/configure.ac b/configure.ac index 302eebed..ab8c93a1 100644 --- a/configure.ac +++ b/configure.ac @@ -25,6 +25,12 @@ AC_CHECK_HEADER(google/dense_hash_map, AC_PROG_INSTALL GTEST_LIB_CHECK +AC_ARG_ENABLE(mpi, + [ --enable-mpi Build MPI binaries, assumes mpi.h is present ], + [ mpi=yes + ]) +AM_CONDITIONAL([MPI], [test "x$mpi" = xyes]) + AM_CONDITIONAL([SRI_LM], false) AC_ARG_WITH(srilm, [AC_HELP_STRING([--with-srilm=PATH], [(optional) path to SRI's LM toolkit])], diff --git a/decoder/translator.cc b/decoder/translator.cc index e6c282e1..277c3a2d 100644 --- a/decoder/translator.cc +++ b/decoder/translator.cc @@ -3,6 +3,8 @@ #include #include +#include "verbose.h" + using namespace std; Translator::~Translator() {} @@ -42,7 +44,7 @@ void Translator::SentenceComplete() { // metadata void Translator::ProcessMarkupHintsImpl(const map& kv) { int unprocessed = kv.size() - kv.count("id"); - cerr << "Inside translator process hints\n"; + if (!SILENT) cerr << "Inside translator process hints\n"; if (unprocessed > 0) { cerr << "Sentence markup contains unprocessed data:\n"; for (map::const_iterator it = kv.begin(); it != kv.end(); ++it) { diff --git a/gi/pipeline/blacklight.config b/gi/pipeline/blacklight.config new file mode 100644 index 00000000..fc59a604 --- /dev/null +++ b/gi/pipeline/blacklight.config @@ -0,0 +1,9 @@ +# THIS FILE GIVES THE LOCATIONS OF THE CORPORA USED +# name path aligned-corpus LM dev dev-refs test1 testt-eval.sh ... +/usr/users/0/cdyer/ws10smt/data +btec /home/cdyer/ws10smt-data/btec/ split.zh-en.al lm/en.3gram.lm.gz devtest/devset1_2.zh devtest/devset1_2.lc.en* devtest/devset3.zh eval-devset3.sh +zhen /home/cdyer/ws10smt-data/chinese-english corpus.zh-en.al lm/c2e.3gram.lm.gz dev_and_test/mt02.src.txt dev_and_test/mt02.ref.* dev_and_test/mt03.src.txt eval-mt03.sh +aren /home/cdyer/ws10smt-data/arabic-english corpus.ar-en-al lm/a2e.3gram.lm.gz dev_and_test/dev.src.txt dev_and_test/dev.ref.txt.* dev_and_test/mt05.src.txt eval-mt05.sh +uren /usr/users/0/cdyer/ws10smt/data/urdu-english corpus.ur-en.al lm/u2e.en.lm.gz dev/dev.ur dev/dev.en* devtest/devtest.ur eval-devtest.sh +nlfr /home/cdyer/ws10smt-data/dutch-french corpus.nl-fr.al + diff --git a/training/Makefile.am b/training/Makefile.am index a947e4a5..ea637d9e 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -14,6 +14,20 @@ noinst_PROGRAMS = \ lbfgs_test \ optimize_test +TESTS = lbfgs_test optimize_test + + +if MPI +bin_PROGRAMS += mpi_batch_optimize \ + mpi_online_optimize + +mpi_batch_optimize_SOURCES = mpi_batch_optimize.cc optimize.cc +mpi_batch_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz -lmpi++ -lmpi + +mpi_online_optimize_SOURCES = mpi_online_optimize.cc online_optimizer.cc +mpi_online_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz -lmpi++ -lmpi +endif + online_train_SOURCES = online_train.cc online_optimizer.cc online_train_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc new file mode 100644 index 00000000..7953513e --- /dev/null +++ b/training/mpi_batch_optimize.cc @@ -0,0 +1,326 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "verbose.h" +#include "hg.h" +#include "prob.h" +#include "inside_outside.h" +#include "ff_register.h" +#include "decoder.h" +#include "filelib.h" +#include "optimize.h" +#include "fdict.h" +#include "weights.h" +#include "sparse_vector.h" + +using namespace std; +using boost::shared_ptr; +namespace po = boost::program_options; + +void SanityCheck(const vector& w) { + for (int i = 0; i < w.size(); ++i) { + assert(!isnan(w[i])); + assert(!isinf(w[i])); + } +} + +struct FComp { + const vector& w_; + FComp(const vector& w) : w_(w) {} + bool operator()(int a, int b) const { + return fabs(w_[a]) > fabs(w_[b]); + } +}; + +void ShowLargestFeatures(const vector& w) { + vector fnums(w.size()); + for (int i = 0; i < w.size(); ++i) + fnums[i] = i; + vector::iterator mid = fnums.begin(); + mid += (w.size() > 10 ? 10 : w.size()); + partial_sort(fnums.begin(), mid, fnums.end(), FComp(w)); + cerr << "TOP FEATURES:"; + for (vector::iterator i = fnums.begin(); i != mid; ++i) { + cerr << ' ' << FD::Convert(*i) << '=' << w[*i]; + } + cerr << endl; +} + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("input_weights,w",po::value(),"Input feature weights file") + ("training_data,t",po::value(),"Training data") + ("decoder_config,d",po::value(),"Decoder configuration file") + ("output_weights,o",po::value()->default_value("-"),"Output feature weights file") + ("optimization_method,m", po::value()->default_value("lbfgs"), "Optimization method (sgd, lbfgs, rprop)") + ("correction_buffers,M", po::value()->default_value(10), "Number of gradients for LBFGS to maintain in memory") + ("gaussian_prior,p","Use a Gaussian prior on the weights") + ("means,u", po::value(), "File containing the means for Gaussian prior") + ("sigma_squared", po::value()->default_value(1.0), "Sigma squared term for spherical Gaussian prior"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || !conf->count("input_weights") || !conf->count("training_data") || !conf->count("decoder_config")) { + cerr << dcmdline_options << endl; + MPI::Finalize(); + exit(1); + } +} + +void ReadTrainingCorpus(const string& fname, int rank, int size, vector* c) { + ReadFile rf(fname); + istream& in = *rf.stream(); + string line; + int lc = 0; + while(in) { + getline(in, line); + if (!in) break; + if (lc % size == rank) c->push_back(line); + ++lc; + } +} + +static const double kMINUS_EPSILON = -1e-6; + +struct TrainingObserver : public DecoderObserver { + void Reset() { + acc_grad.clear(); + acc_obj = 0; + total_complete = 0; + } + + void SetLocalGradientAndObjective(vector* g, double* o) const { + *o = acc_obj; + for (SparseVector::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) + (*g)[it->first] = it->second; + } + + virtual void NotifyDecodingStart(const SentenceMetadata& smeta) { + cur_model_exp.clear(); + cur_obj = 0; + state = 1; + } + + // compute model expectations, denominator of objective + virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { + assert(state == 1); + state = 2; + const prob_t z = InsideOutside, + EdgeFeaturesAndProbWeightFunction>(*hg, &cur_model_exp); + cur_obj = log(z); + cur_model_exp /= z; + } + + // compute "empirical" expectations, numerator of objective + virtual void NotifyAlignmentForest(const SentenceMetadata& smeta, Hypergraph* hg) { + assert(state == 2); + state = 3; + SparseVector ref_exp; + const prob_t ref_z = InsideOutside, + EdgeFeaturesAndProbWeightFunction>(*hg, &ref_exp); + ref_exp /= ref_z; + + double log_ref_z; +#if 0 + if (crf_uniform_empirical) { + log_ref_z = ref_exp.dot(feature_weights); + } else { + log_ref_z = log(ref_z); + } +#else + log_ref_z = log(ref_z); +#endif + + // rounding errors means that <0 is too strict + if ((cur_obj - log_ref_z) < kMINUS_EPSILON) { + cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; + exit(1); + } + assert(!isnan(log_ref_z)); + ref_exp -= cur_model_exp; + acc_grad -= ref_exp; + acc_obj += (cur_obj - log_ref_z); + } + + virtual void NotifyDecodingComplete(const SentenceMetadata& smeta) { + if (state == 3) { + ++total_complete; + } else { + } + } + + int total_complete; + SparseVector cur_model_exp; + SparseVector acc_grad; + double acc_obj; + double cur_obj; + int state; +}; + +int main(int argc, char** argv) { + MPI::Init(argc, argv); + const int size = MPI::COMM_WORLD.Get_size(); + const int rank = MPI::COMM_WORLD.Get_rank(); + SetSilent(true); // turn off verbose decoder output + cerr << "MPI: I am " << rank << '/' << size << endl; + register_feature_functions(); + + po::variables_map conf; + InitCommandLine(argc, argv, &conf); + + // load initial weights + Weights weights; + weights.InitFromFile(conf["input_weights"].as()); + + // freeze feature set (should be optional?) + const bool freeze_feature_set = true; + if (freeze_feature_set) FD::Freeze(); + + // load cdec.ini and set up decoder + ReadFile ini_rf(conf["decoder_config"].as()); + Decoder decoder(ini_rf.stream()); + if (decoder.GetConf()["input"].as() != "-") { + cerr << "cdec.ini must not set an input file\n"; + MPI::COMM_WORLD.Abort(1); + } + + const int num_feats = FD::NumFeats(); + if (rank == 0) cerr << "Number of features: " << num_feats << endl; + const bool gaussian_prior = conf.count("gaussian_prior"); + vector means(num_feats, 0); + if (conf.count("means")) { + if (!gaussian_prior) { + cerr << "Don't use --means without --gaussian_prior!\n"; + exit(1); + } + Weights wm; + wm.InitFromFile(conf["means"].as()); + if (num_feats != FD::NumFeats()) { + cerr << "[ERROR] Means file had unexpected features!\n"; + exit(1); + } + wm.InitVector(&means); + } + shared_ptr o; + if (rank == 0) { + const string omethod = conf["optimization_method"].as(); + if (omethod == "rprop") + o.reset(new RPropOptimizer(num_feats)); // TODO add configuration + else + o.reset(new LBFGSOptimizer(num_feats, conf["correction_buffers"].as())); + cerr << "Optimizer: " << o->Name() << endl; + } + double objective = 0; + vector lambdas(num_feats, 0.0); + weights.InitVector(&lambdas); + if (lambdas.size() != num_feats) { + cerr << "Initial weights file did not have all features specified!\n feats=" + << num_feats << "\n weights file=" << lambdas.size() << endl; + lambdas.resize(num_feats, 0.0); + } + vector gradient(num_feats, 0.0); + vector rcv_grad(num_feats, 0.0); + bool converged = false; + vector corpus; + ReadTrainingCorpus(conf["training_data"].as(), rank, size, &corpus); + assert(corpus.size() > 0); + + TrainingObserver observer; + while (!converged) { + observer.Reset(); + if (rank == 0) { + cerr << "Starting decoding... (~" << corpus.size() << " sentences / proc)\n"; + } + decoder.SetWeights(lambdas); + for (int i = 0; i < corpus.size(); ++i) + decoder.Decode(corpus[i], &observer); + + fill(gradient.begin(), gradient.end(), 0); + fill(rcv_grad.begin(), rcv_grad.end(), 0); + observer.SetLocalGradientAndObjective(&gradient, &objective); + + double to = 0; + MPI::COMM_WORLD.Reduce(const_cast(&gradient.data()[0]), &rcv_grad[0], num_feats, MPI::DOUBLE, MPI::SUM, 0); + MPI::COMM_WORLD.Reduce(&objective, &to, 1, MPI::DOUBLE, MPI::SUM, 0); + swap(gradient, rcv_grad); + objective = to; + + if (rank == 0) { // run optimizer only on rank=0 node + if (gaussian_prior) { + const double sigsq = conf["sigma_squared"].as(); + double norm = 0; + for (int k = 1; k < lambdas.size(); ++k) { + const double& lambda_k = lambdas[k]; + if (lambda_k) { + const double param = (lambda_k - means[k]); + norm += param * param; + gradient[k] += param / sigsq; + } + } + const double reg = norm / (2.0 * sigsq); + cerr << "REGULARIZATION TERM: " << reg << endl; + objective += reg; + } + cerr << "EVALUATION #" << o->EvaluationCount() << " OBJECTIVE: " << objective << endl; + double gnorm = 0; + for (int i = 0; i < gradient.size(); ++i) + gnorm += gradient[i] * gradient[i]; + cerr << " GNORM=" << sqrt(gnorm) << endl; + vector old = lambdas; + int c = 0; + while (old == lambdas) { + ++c; + if (c > 1) { cerr << "Same lambdas, repeating optimization\n"; } + o->Optimize(objective, gradient, &lambdas); + assert(c < 5); + } + old.clear(); + SanityCheck(lambdas); + ShowLargestFeatures(lambdas); + weights.InitFromVector(lambdas); + + converged = o->HasConverged(); + if (converged) { cerr << "OPTIMIZER REPORTS CONVERGENCE!\n"; } + + string fname = "weights.cur.gz"; + if (converged) { fname = "weights.final.gz"; } + ostringstream vv; + vv << "Objective = " << objective << " (eval count=" << o->EvaluationCount() << ")"; + const string svv = vv.str(); + weights.WriteToFile(fname, true, &svv); + } // rank == 0 + int cint = converged; + MPI::COMM_WORLD.Bcast(const_cast(&lambdas.data()[0]), num_feats, MPI::DOUBLE, 0); + MPI::COMM_WORLD.Bcast(&cint, 1, MPI::INT, 0); + MPI::COMM_WORLD.Barrier(); + converged = cint; + } + MPI::Finalize(); + return 0; +} diff --git a/training/mpi_online_optimize.cc b/training/mpi_online_optimize.cc new file mode 100644 index 00000000..95b462bb --- /dev/null +++ b/training/mpi_online_optimize.cc @@ -0,0 +1,322 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "verbose.h" +#include "hg.h" +#include "prob.h" +#include "inside_outside.h" +#include "ff_register.h" +#include "decoder.h" +#include "filelib.h" +#include "online_optimizer.h" +#include "fdict.h" +#include "weights.h" +#include "sparse_vector.h" +#include "sampler.h" + +using namespace std; +using boost::shared_ptr; +namespace po = boost::program_options; + +void SanityCheck(const vector& w) { + for (int i = 0; i < w.size(); ++i) { + assert(!isnan(w[i])); + assert(!isinf(w[i])); + } +} + +struct FComp { + const vector& w_; + FComp(const vector& w) : w_(w) {} + bool operator()(int a, int b) const { + return fabs(w_[a]) > fabs(w_[b]); + } +}; + +void ShowLargestFeatures(const vector& w) { + vector fnums(w.size()); + for (int i = 0; i < w.size(); ++i) + fnums[i] = i; + vector::iterator mid = fnums.begin(); + mid += (w.size() > 10 ? 10 : w.size()); + partial_sort(fnums.begin(), mid, fnums.end(), FComp(w)); + cerr << "TOP FEATURES:"; + for (vector::iterator i = fnums.begin(); i != mid; ++i) { + cerr << ' ' << FD::Convert(*i) << '=' << w[*i]; + } + cerr << endl; +} + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("input_weights,w",po::value(),"Input feature weights file") + ("training_data,t",po::value(),"Training data corpus") + ("decoder_config,c",po::value(),"Decoder configuration file") + ("output_weights,o",po::value()->default_value("-"),"Output feature weights file") + ("minibatch_size_per_proc,s", po::value()->default_value(5), "Number of training instances evaluated per processor in each minibatch") + ("freeze_feature_set,Z", "The feature set specified in the initial weights file is frozen throughout the duration of training") + ("optimization_method,m", po::value()->default_value("sgd"), "Optimization method (sgd)") + ("eta_0,e", po::value()->default_value(0.2), "Initial learning rate for SGD (eta_0)") + ("L1,1","Use L1 regularization") + ("gaussian_prior,g","Use a Gaussian prior on the weights") + ("sigma_squared", po::value()->default_value(1.0), "Sigma squared term for spherical Gaussian prior"); + po::options_description clo("Command line options"); + clo.add_options() + ("config", po::value(), "Configuration file") + ("help,h", "Print this help message and exit"); + po::options_description dconfig_options, dcmdline_options; + dconfig_options.add(opts); + dcmdline_options.add(opts).add(clo); + + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("config")) { + ifstream config((*conf)["config"].as().c_str()); + po::store(po::parse_config_file(config, dconfig_options), *conf); + } + po::notify(*conf); + + if (conf->count("help") || !conf->count("input_weights") || !conf->count("training_data") || !conf->count("decoder_config")) { + cerr << dcmdline_options << endl; + MPI::Finalize(); + exit(1); + } +} + +void ReadTrainingCorpus(const string& fname, vector* c) { + ReadFile rf(fname); + istream& in = *rf.stream(); + string line; + while(in) { + getline(in, line); + if (!in) break; + c->push_back(line); + } +} + +static const double kMINUS_EPSILON = -1e-6; + +struct TrainingObserver : public DecoderObserver { + void Reset() { + acc_grad.clear(); + acc_obj = 0; + total_complete = 0; + } + + void SetLocalGradientAndObjective(vector* g, double* o) const { + *o = acc_obj; + for (SparseVector::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) + (*g)[it->first] = it->second; + } + + virtual void NotifyDecodingStart(const SentenceMetadata& smeta) { + cur_model_exp.clear(); + cur_obj = 0; + state = 1; + } + + // compute model expectations, denominator of objective + virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { + assert(state == 1); + state = 2; + const prob_t z = InsideOutside, + EdgeFeaturesAndProbWeightFunction>(*hg, &cur_model_exp); + cur_obj = log(z); + cur_model_exp /= z; + } + + // compute "empirical" expectations, numerator of objective + virtual void NotifyAlignmentForest(const SentenceMetadata& smeta, Hypergraph* hg) { + assert(state == 2); + state = 3; + SparseVector ref_exp; + const prob_t ref_z = InsideOutside, + EdgeFeaturesAndProbWeightFunction>(*hg, &ref_exp); + ref_exp /= ref_z; + + double log_ref_z; +#if 0 + if (crf_uniform_empirical) { + log_ref_z = ref_exp.dot(feature_weights); + } else { + log_ref_z = log(ref_z); + } +#else + log_ref_z = log(ref_z); +#endif + + // rounding errors means that <0 is too strict + if ((cur_obj - log_ref_z) < kMINUS_EPSILON) { + cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; + exit(1); + } + assert(!isnan(log_ref_z)); + ref_exp -= cur_model_exp; + acc_grad -= ref_exp; + acc_obj += (cur_obj - log_ref_z); + } + + virtual void NotifyDecodingComplete(const SentenceMetadata& smeta) { + if (state == 3) { + ++total_complete; + } else { + } + } + + int total_complete; + SparseVector cur_model_exp; + SparseVector acc_grad; + double acc_obj; + double cur_obj; + int state; +}; + +template +inline void Shuffle(vector* c, MT19937* rng) { + unsigned size = c->size(); + for (unsigned i = size - 1; i > 0; --i) { + const unsigned j = static_cast(rng->next() * i); + swap((*c)[j], (*c)[i]); + } +} + +int main(int argc, char** argv) { + MPI::Init(argc, argv); + const int size = MPI::COMM_WORLD.Get_size(); + const int rank = MPI::COMM_WORLD.Get_rank(); + SetSilent(true); // turn off verbose decoder output + cerr << "MPI: I am " << rank << '/' << size << endl; + register_feature_functions(); + MT19937* rng = NULL; + if (rank == 0) rng = new MT19937; + + po::variables_map conf; + InitCommandLine(argc, argv, &conf); + + // load initial weights + Weights weights; + weights.InitFromFile(conf["input_weights"].as()); + + // freeze feature set + const bool freeze_feature_set = conf.count("freeze_feature_set"); + if (freeze_feature_set) FD::Freeze(); + + // load cdec.ini and set up decoder + ReadFile ini_rf(conf["decoder_config"].as()); + Decoder decoder(ini_rf.stream()); + if (decoder.GetConf()["input"].as() != "-") { + cerr << "cdec.ini must not set an input file\n"; + MPI::COMM_WORLD.Abort(1); + } + + vector corpus; + ReadTrainingCorpus(conf["training_data"].as(), &corpus); + assert(corpus.size() > 0); + + std::tr1::shared_ptr o; + std::tr1::shared_ptr lr; + if (rank == 0) { + // TODO config + lr.reset(new ExponentialDecayLearningRate(corpus.size(), conf["eta_0"].as())); + + const string omethod = conf["optimization_method"].as(); + if (omethod == "sgd") { + const double C = 1.0; + o.reset(new CumulativeL1OnlineOptimizer(lr, corpus.size(), C)); + } else { + assert(!"fail"); + } + } + double objective = 0; + vector lambdas; + weights.InitVector(&lambdas); + bool converged = false; + + TrainingObserver observer; + while (!converged) { + observer.Reset(); + if (rank == 0) { + cerr << "Starting decoding... (~" << corpus.size() << " sentences / proc)\n"; + } + decoder.SetWeights(lambdas); +#if 0 + for (int i = 0; i < corpus.size(); ++i) + decoder.Decode(corpus[i], &observer); + + fill(gradient.begin(), gradient.end(), 0); + fill(rcv_grad.begin(), rcv_grad.end(), 0); + observer.SetLocalGradientAndObjective(&gradient, &objective); + + double to = 0; + MPI::COMM_WORLD.Reduce(const_cast(&gradient.data()[0]), &rcv_grad[0], num_feats, MPI::DOUBLE, MPI::SUM, 0); + MPI::COMM_WORLD.Reduce(&objective, &to, 1, MPI::DOUBLE, MPI::SUM, 0); + swap(gradient, rcv_grad); + objective = to; + + if (rank == 0) { // run optimizer only on rank=0 node + if (gaussian_prior) { + const double sigsq = conf["sigma_squared"].as(); + double norm = 0; + for (int k = 1; k < lambdas.size(); ++k) { + const double& lambda_k = lambdas[k]; + if (lambda_k) { + const double param = (lambda_k - means[k]); + norm += param * param; + gradient[k] += param / sigsq; + } + } + const double reg = norm / (2.0 * sigsq); + cerr << "REGULARIZATION TERM: " << reg << endl; + objective += reg; + } + cerr << "EVALUATION #" << o->EvaluationCount() << " OBJECTIVE: " << objective << endl; + double gnorm = 0; + for (int i = 0; i < gradient.size(); ++i) + gnorm += gradient[i] * gradient[i]; + cerr << " GNORM=" << sqrt(gnorm) << endl; + vector old = lambdas; + int c = 0; + while (old == lambdas) { + ++c; + if (c > 1) { cerr << "Same lambdas, repeating optimization\n"; } + o->Optimize(objective, gradient, &lambdas); + assert(c < 5); + } + old.clear(); + SanityCheck(lambdas); + ShowLargestFeatures(lambdas); + weights.InitFromVector(lambdas); + + converged = o->HasConverged(); + if (converged) { cerr << "OPTIMIZER REPORTS CONVERGENCE!\n"; } + + string fname = "weights.cur.gz"; + if (converged) { fname = "weights.final.gz"; } + ostringstream vv; + vv << "Objective = " << objective << " (eval count=" << o->EvaluationCount() << ")"; + const string svv = vv.str(); + weights.WriteToFile(fname, true, &svv); + } // rank == 0 + int cint = converged; + MPI::COMM_WORLD.Bcast(const_cast(&lambdas.data()[0]), num_feats, MPI::DOUBLE, 0); + MPI::COMM_WORLD.Bcast(&cint, 1, MPI::INT, 0); + MPI::COMM_WORLD.Barrier(); + converged = cint; +#endif + } + MPI::Finalize(); + return 0; +} diff --git a/training/optimize.cc b/training/optimize.cc index 1377caa6..f0740d5c 100644 --- a/training/optimize.cc +++ b/training/optimize.cc @@ -19,7 +19,6 @@ void BatchOptimizer::Save(ostream* out) const { void BatchOptimizer::Load(istream* in) { in->read((char*)&eval_, sizeof(eval_)); - ++eval_; in->read((char*)&has_converged_, sizeof(has_converged_)); LoadImpl(in); unsigned int magic = 0; // should be uint32_t diff --git a/training/optimize.h b/training/optimize.h index e2620f93..07943b44 100644 --- a/training/optimize.h +++ b/training/optimize.h @@ -22,6 +22,7 @@ class BatchOptimizer { const std::vector& g, std::vector* x) { assert(g.size() == x->size()); + ++eval_; OptimizeImpl(obj, g, x); scitbx::lbfgs::traditional_convergence_test converged(g.size()); has_converged_ = converged(&(*x)[0], &g[0]); -- cgit v1.2.3