#include #include #include #include #include #include "config.h" #ifdef HAVE_MPI #include #include namespace mpi = boost::mpi; #endif #include #include #include #include "verbose.h" #include "hg.h" #include "prob.h" #include "inside_outside.h" #include "ff_register.h" #include "decoder.h" #include "filelib.h" #include "stringlib.h" #include "optimize.h" #include "fdict.h" #include "weights.h" #include "sparse_vector.h" using namespace std; using boost::shared_ptr; namespace po = boost::program_options; bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::options_description opts("Configuration options"); opts.add_options() ("input_weights,w",po::value(),"Input feature weights file") ("training_data,t",po::value(),"Training data") ("decoder_config,d",po::value(),"Decoder configuration file") ("output_weights,o",po::value()->default_value("-"),"Output feature weights file") ("optimization_method,m", po::value()->default_value("lbfgs"), "Optimization method (sgd, lbfgs, rprop)") ("correction_buffers,M", po::value()->default_value(10), "Number of gradients for LBFGS to maintain in memory") ("gaussian_prior,p","Use a Gaussian prior on the weights") ("means,u", po::value(), "File containing the means for Gaussian prior") ("per_sentence_grammar_scratch,P", po::value(), "(Optional) location of scratch space to copy per-sentence grammars for fast access, useful if a RAM disk is available") ("sigma_squared", po::value()->default_value(1.0), "Sigma squared term for spherical Gaussian prior"); po::options_description clo("Command line options"); clo.add_options() ("config", po::value(), "Configuration file") ("help,h", "Print this help message and exit"); po::options_description dconfig_options, dcmdline_options; dconfig_options.add(opts); dcmdline_options.add(opts).add(clo); po::store(parse_command_line(argc, argv, dcmdline_options), *conf); if (conf->count("config")) { ifstream config((*conf)["config"].as().c_str()); po::store(po::parse_config_file(config, dconfig_options), *conf); } po::notify(*conf); if (conf->count("help") || !conf->count("input_weights") || !(conf->count("training_data")) || !conf->count("decoder_config")) { cerr << dcmdline_options << endl; return false; } return true; } void ReadTrainingCorpus(const string& fname, int rank, int size, vector* c) { ReadFile rf(fname); istream& in = *rf.stream(); string line; int lc = 0; while(in) { getline(in, line); if (!in) break; if (lc % size == rank) c->push_back(line); ++lc; } } static const double kMINUS_EPSILON = -1e-6; struct TrainingObserver : public DecoderObserver { void Reset() { acc_grad.clear(); acc_obj = 0; total_complete = 0; } void SetLocalGradientAndObjective(vector* g, double* o) const { *o = acc_obj; for (SparseVector::const_iterator it = acc_grad.begin(); it != acc_grad.end(); ++it) (*g)[it->first] = it->second.as_float(); } virtual void NotifyDecodingStart(const SentenceMetadata& smeta) { cur_model_exp.clear(); cur_obj = 0; state = 1; } // compute model expectations, denominator of objective virtual void NotifyTranslationForest(const SentenceMetadata& smeta, Hypergraph* hg) { assert(state == 1); state = 2; const prob_t z = InsideOutside, EdgeFeaturesAndProbWeightFunction>(*hg, &cur_model_exp); cur_obj = log(z); cur_model_exp /= z; } // compute "empirical" expectations, numerator of objective virtual void NotifyAlignmentForest(const SentenceMetadata& smeta, Hypergraph* hg) { assert(state == 2); state = 3; SparseVector ref_exp; const prob_t ref_z = InsideOutside, EdgeFeaturesAndProbWeightFunction>(*hg, &ref_exp); ref_exp /= ref_z; double log_ref_z; #if 0 if (crf_uniform_empirical) { log_ref_z = ref_exp.dot(feature_weights); } else { log_ref_z = log(ref_z); } #else log_ref_z = log(ref_z); #endif // rounding errors means that <0 is too strict if ((cur_obj - log_ref_z) < kMINUS_EPSILON) { cerr << "DIFF. ERR! log_model_z < log_ref_z: " << cur_obj << " " << log_ref_z << endl; exit(1); } assert(!isnan(log_ref_z)); ref_exp -= cur_model_exp; acc_grad -= ref_exp; acc_obj += (cur_obj - log_ref_z); } virtual void NotifyDecodingComplete(const SentenceMetadata& smeta) { if (state == 3) { ++total_complete; } else { } } int total_complete; SparseVector cur_model_exp; SparseVector acc_grad; double acc_obj; double cur_obj; int state; }; void ReadConfig(const string& ini, vector* out) { ReadFile rf(ini); istream& in = *rf.stream(); while(in) { string line; getline(in, line); if (!in) continue; out->push_back(line); } } void StoreConfig(const vector& cfg, istringstream* o) { ostringstream os; for (int i = 0; i < cfg.size(); ++i) { os << cfg[i] << endl; } o->str(os.str()); } template struct VectorPlus : public binary_function, vector, vector > { vector operator()(const vector& a, const vector& b) const { assert(a.size() == b.size()); vector v(a.size()); transform(a.begin(), a.end(), b.begin(), v.begin(), plus()); return v; } }; void MovePerSentenceGrammars(const string& root, int size, int rank, vector* c) { if (!DirectoryExists(root)) { cerr << "Can't find scratch space at " << root << endl; abort(); } ostringstream os; os << root << "/psg." << size << "_of_" << rank; const string path = os.str(); MkDirP(path); string sent; map attr; for (unsigned i = 0; i < c->size(); ++i) { sent = (*c)[i]; attr.clear(); ProcessAndStripSGML(&sent, &attr); map::iterator it = attr.find("grammar"); if (it != attr.end()) { string src_file = it->second; bool is_gzipped = (src_file.size() > 3) && (src_file.rfind(".gz") == (src_file.size() - 3)); string new_name = path + "/" + md5(sent); if (is_gzipped) new_name += ".gz"; CopyFile(src_file, new_name); it->second = new_name; } ostringstream ns; ns << SGMLOpenSegTag(attr) << ' ' << sent << " "; (*c)[i] = ns.str(); } } int main(int argc, char** argv) { #ifdef HAVE_MPI mpi::environment env(argc, argv); mpi::communicator world; const int size = world.size(); const int rank = world.rank(); #else const int size = 1; const int rank = 0; #endif SetSilent(true); // turn off verbose decoder output register_feature_functions(); po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) return 1; // load cdec.ini and set up decoder vector cdec_ini; ReadConfig(conf["decoder_config"].as(), &cdec_ini); istringstream ini; StoreConfig(cdec_ini, &ini); if (rank == 0) cerr << "Loading grammar...\n"; Decoder* decoder = new Decoder(&ini); if (decoder->GetConf()["input"].as() != "-") { cerr << "cdec.ini must not set an input file\n"; return 1; } if (rank == 0) cerr << "Done loading grammar!\n"; // load initial weights if (rank == 0) { cerr << "Loading weights...\n"; } vector& lambdas = decoder->CurrentWeightVector(); Weights::InitFromFile(conf["input_weights"].as(), &lambdas); if (rank == 0) { cerr << "Done loading weights.\n"; } // freeze feature set (should be optional?) const bool freeze_feature_set = true; if (freeze_feature_set) FD::Freeze(); const int num_feats = FD::NumFeats(); if (rank == 0) cerr << "Number of features: " << num_feats << endl; lambdas.resize(num_feats); const bool gaussian_prior = conf.count("gaussian_prior"); vector means(num_feats, 0); if (conf.count("means")) { if (!gaussian_prior) { cerr << "Don't use --means without --gaussian_prior!\n"; exit(1); } Weights::InitFromFile(conf["means"].as(), &means); } shared_ptr o; if (rank == 0) { const string omethod = conf["optimization_method"].as(); if (omethod == "rprop") o.reset(new RPropOptimizer(num_feats)); // TODO add configuration else o.reset(new LBFGSOptimizer(num_feats, conf["correction_buffers"].as())); cerr << "Optimizer: " << o->Name() << endl; } double objective = 0; vector gradient(num_feats, 0.0); vector rcv_grad; rcv_grad.clear(); bool converged = false; vector corpus; ReadTrainingCorpus(conf["training_data"].as(), rank, size, &corpus); assert(corpus.size() > 0); if (conf.count("per_sentence_grammar_scratch")) MovePerSentenceGrammars(conf["per_sentence_grammar_scratch"].as(), rank, size, &corpus); TrainingObserver observer; while (!converged) { observer.Reset(); #ifdef HAVE_MPI mpi::timer timer; world.barrier(); #endif if (rank == 0) { cerr << "Starting decoding... (~" << corpus.size() << " sentences / proc)\n"; } for (int i = 0; i < corpus.size(); ++i) decoder->Decode(corpus[i], &observer); cerr << " process " << rank << '/' << size << " done\n"; fill(gradient.begin(), gradient.end(), 0); observer.SetLocalGradientAndObjective(&gradient, &objective); double to = 0; #ifdef HAVE_MPI rcv_grad.resize(num_feats, 0.0); mpi::reduce(world, &gradient[0], gradient.size(), &rcv_grad[0], plus(), 0); swap(gradient, rcv_grad); rcv_grad.clear(); mpi::reduce(world, objective, to, plus(), 0); objective = to; #endif if (rank == 0) { // run optimizer only on rank=0 node if (gaussian_prior) { const double sigsq = conf["sigma_squared"].as(); double norm = 0; for (int k = 1; k < lambdas.size(); ++k) { const double& lambda_k = lambdas[k]; if (lambda_k) { const double param = (lambda_k - means[k]); norm += param * param; gradient[k] += param / sigsq; } } const double reg = norm / (2.0 * sigsq); cerr << "REGULARIZATION TERM: " << reg << endl; objective += reg; } cerr << "EVALUATION #" << o->EvaluationCount() << " OBJECTIVE: " << objective << endl; double gnorm = 0; for (int i = 0; i < gradient.size(); ++i) gnorm += gradient[i] * gradient[i]; cerr << " GNORM=" << sqrt(gnorm) << endl; vector old = lambdas; int c = 0; while (old == lambdas) { ++c; if (c > 1) { cerr << "Same lambdas, repeating optimization\n"; } o->Optimize(objective, gradient, &lambdas); assert(c < 5); } old.clear(); Weights::SanityCheck(lambdas); Weights::ShowLargestFeatures(lambdas); converged = o->HasConverged(); if (converged) { cerr << "OPTIMIZER REPORTS CONVERGENCE!\n"; } string fname = "weights.cur.gz"; if (converged) { fname = "weights.final.gz"; } ostringstream vv; vv << "Objective = " << objective << " (eval count=" << o->EvaluationCount() << ")"; const string svv = vv.str(); Weights::WriteToFile(fname, lambdas, true, &svv); } // rank == 0 int cint = converged; #ifdef HAVE_MPI mpi::broadcast(world, &lambdas[0], lambdas.size(), 0); mpi::broadcast(world, cint, 0); if (rank == 0) { cerr << " ELAPSED TIME THIS ITERATION=" << timer.elapsed() << endl; } #endif converged = cint; } return 0; }