From a769e4964db2443ce165043095e18dfc0d788910 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Mon, 31 Oct 2011 14:03:22 -0400 Subject: lbfgs + time-series minibatch optimization --- training/lbfgs_test.cc | 7 +- training/mpi_flex_optimize.cc | 145 +++++++++++++++++++++++++++--------------- training/optimize.cc | 3 +- word-aligner/aligner.pl | 8 ++- word-aligner/ortho-norm/mg.pl | 13 ++++ word-aligner/ortho-norm/rw.pl | 13 ++++ word-aligner/stemmers/mg.pl | 39 ++++++++++++ word-aligner/stemmers/rw.pl | 38 +++++++++++ 8 files changed, 208 insertions(+), 58 deletions(-) create mode 100755 word-aligner/ortho-norm/mg.pl create mode 100755 word-aligner/ortho-norm/rw.pl create mode 100755 word-aligner/stemmers/mg.pl create mode 100755 word-aligner/stemmers/rw.pl diff --git a/training/lbfgs_test.cc b/training/lbfgs_test.cc index fc21e98d..c94682e9 100644 --- a/training/lbfgs_test.cc +++ b/training/lbfgs_test.cc @@ -28,11 +28,14 @@ double TestOptimizer() { g[2] = 2 * x[2] + 6; obj = 4 * x[0]*x[0] + x[0] * x[1] + x[1]*x[1] + x[2]*x[2] + 6 * x[2] + 5; opt.run(x, obj, g); - + if (!opt.requests_f_and_g()) { + if (converged(x,g)) break; + opt.run(x, obj, g); + } cerr << x[0] << " " << x[1] << " " << x[2] << endl; cerr << " obj=" << obj << "\td/dx1=" << g[0] << " d/dx2=" << g[1] << " d/dx3=" << g[2] << endl; cerr << opt << endl; - } while (!converged(x, g)); + } while (true); return obj; } diff --git a/training/mpi_flex_optimize.cc b/training/mpi_flex_optimize.cc index 87c5f331..00746532 100644 --- a/training/mpi_flex_optimize.cc +++ b/training/mpi_flex_optimize.cc @@ -39,15 +39,12 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { ("weights,w",po::value(),"Initial feature weights") ("training_data,d",po::value(),"Training data") ("minibatch_size_per_proc,s", po::value()->default_value(6), "Number of training instances evaluated per processor in each minibatch") - ("optimization_method,m", po::value()->default_value("lbfgs"), "Optimization method (options: lbfgs, sgd, rprop)") - ("minibatch_iterations,i", po::value()->default_value(10), "Number of optimization iterations per minibatch (1 = standard SGD)") + ("minibatch_iterations,i", po::value()->default_value(10), "Number of optimization iterations per minibatch") ("iterations,I", po::value()->default_value(50), "Number of passes through the training data before termination") + ("regularization_strength,C", po::value()->default_value(0.2), "Regularization strength") + ("time_series_strength,T", po::value()->default_value(0.0), "Time series regularization strength") ("random_seed,S", po::value(), "Random seed (if not specified, /dev/random will be used)") - ("lbfgs_memory_buffers,M", po::value()->default_value(10), "Number of memory buffers for LBFGS history") - ("eta_0,e", po::value()->default_value(0.1), "Initial learning rate for SGD") - ("L1,1","Use L1 regularization") - ("L2,2","Use L2 regularization") - ("regularization_strength,C", po::value()->default_value(1.0), "Regularization strength (C)"); + ("lbfgs_memory_buffers,M", po::value()->default_value(10), "Number of memory buffers for LBFGS history"); po::options_description clo("Command line options"); clo.add_options() ("config", po::value(), "Configuration file") @@ -64,7 +61,7 @@ bool InitCommandLine(int argc, char** argv, po::variables_map* conf) { po::notify(*conf); if (conf->count("help") || !conf->count("training_data") || !conf->count("cdec_config")) { - cerr << "General-purpose minibatch online optimizer (MPI support " + cerr << "LBFGS minibatch online optimizer (MPI support " #if HAVE_MPI << "enabled" #else @@ -166,6 +163,38 @@ void AddGrad(const SparseVector x, double s, SparseVector* acc) acc->add_value(it->first, it->second.as_float() * s); } +double PNorm(const vector& v, const double p) { + double acc = 0; + for (int i = 0; i < v.size(); ++i) + acc += pow(v[i], p); + return pow(acc, 1.0 / p); +} + +void VV(ostream&os, const vector& v) { + for (int i = 1; i < v.size(); ++i) + if (v[i]) os << FD::Convert(i) << "=" << v[i] << " "; +} + +double ApplyRegularizationTerms(const double C, + const double T, + const vector& weights, + const vector& prev_weights, + vector* g) { + assert(weights.size() == g->size()); + double reg = 0; + for (size_t i = 0; i < weights.size(); ++i) { + const double prev_w_i = (i < prev_weights.size() ? prev_weights[i] : 0.0); + const double& w_i = weights[i]; + double& g_i = (*g)[i]; + reg += C * w_i * w_i; + g_i += 2 * C * w_i; + + reg += T * (w_i - prev_w_i) * (w_i - prev_w_i); + g_i += 2 * T * (w_i - prev_w_i); + } + return reg; +} + int main(int argc, char** argv) { #ifdef HAVE_MPI mpi::environment env(argc, argv); @@ -176,7 +205,7 @@ int main(int argc, char** argv) { const int size = 1; const int rank = 0; #endif - if (size > 1) SetSilent(true); // turn off verbose decoder output + if (size > 0) SetSilent(true); // turn off verbose decoder output register_feature_functions(); MT19937* rng = NULL; @@ -186,56 +215,60 @@ int main(int argc, char** argv) { boost::shared_ptr o; const unsigned lbfgs_memory_buffers = conf["lbfgs_memory_buffers"].as(); - - istringstream ins; - ReadConfig(conf["cdec_config"].as(), &ins); - Decoder decoder(&ins); - - // load initial weights - vector init_weights; - if (conf.count("weights")) - Weights::InitFromFile(conf["weights"].as(), &init_weights); + const unsigned size_per_proc = conf["minibatch_size_per_proc"].as(); + const unsigned minibatch_iterations = conf["minibatch_iterations"].as(); + const double regularization_strength = conf["regularization_strength"].as(); + const double time_series_strength = conf["time_series_strength"].as(); + const bool use_time_series_reg = time_series_strength > 0.0; + const unsigned max_iteration = conf["iterations"].as(); vector corpus; vector ids; ReadTrainingCorpus(conf["training_data"].as(), rank, size, &corpus, &ids); assert(corpus.size() > 0); - const unsigned size_per_proc = conf["minibatch_size_per_proc"].as(); if (size_per_proc > corpus.size()) { - cerr << "Minibatch size must be smaller than corpus size!\n"; + cerr << "Minibatch size (per processor) must be smaller or equal to the local corpus size!\n"; return 1; } - size_t total_corpus_size = 0; -#ifdef HAVE_MPI - reduce(world, corpus.size(), total_corpus_size, std::plus(), 0); -#else - total_corpus_size = corpus.size(); -#endif + // initialize decoder (loads hash functions if necessary) + istringstream ins; + ReadConfig(conf["cdec_config"].as(), &ins); + Decoder decoder(&ins); + + // load initial weights + vector prev_weights; + if (conf.count("weights")) + Weights::InitFromFile(conf["weights"].as(), &prev_weights); if (conf.count("random_seed")) rng = new MT19937(conf["random_seed"].as()); else rng = new MT19937; - const unsigned minibatch_iterations = conf["minibatch_iterations"].as(); + size_t total_corpus_size = 0; +#ifdef HAVE_MPI + reduce(world, corpus.size(), total_corpus_size, std::plus(), 0); +#else + total_corpus_size = corpus.size(); +#endif - if (rank == 0) { + if (rank == 0) cerr << "Total corpus size: " << total_corpus_size << endl; - const unsigned batch_size = size_per_proc * size; - } - SparseVector x; - Weights::InitSparseVector(init_weights, &x); CopyHGsObserver observer; int write_weights_every_ith = 100; // TODO configure int titer = -1; - vector& lambdas = decoder.CurrentWeightVector(); - lambdas.swap(init_weights); - init_weights.clear(); + vector& cur_weights = decoder.CurrentWeightVector(); + if (use_time_series_reg) { + cur_weights = prev_weights; + } else { + cur_weights.swap(prev_weights); + prev_weights.clear(); + } int iter = -1; bool converged = false; @@ -243,26 +276,20 @@ int main(int argc, char** argv) { #ifdef HAVE_MPI mpi::timer timer; #endif - x.init_vector(&lambdas); ++iter; ++titer; -#if 0 if (rank == 0) { converged = (iter == max_iteration); - Weights::SanityCheck(lambdas); - Weights::ShowLargestFeatures(lambdas); string fname = "weights.cur.gz"; if (iter % write_weights_every_ith == 0) { - ostringstream o; o << "weights.epoch_" << (ai+1) << '.' << iter << ".gz"; + ostringstream o; o << "weights.epoch_" << iter << ".gz"; fname = o.str(); } - if (converged && ((ai+1)==agenda.size())) { fname = "weights.final.gz"; } + if (converged) { fname = "weights.final.gz"; } ostringstream vv; - vv << "total iter=" << titer << " (of current config iter=" << iter << ") minibatch=" << size_per_proc << " sentences/proc x " << size << " procs. num_feats=" << x.size() << '/' << FD::NumFeats() << " passes_thru_data=" << (titer * size_per_proc / static_cast(corpus.size())) << " eta=" << lr->eta(titer); + vv << "total iter=" << titer << " (of current config iter=" << iter << ") minibatch=" << size_per_proc << " sentences/proc x " << size << " procs. num_feats=" << FD::NumFeats() << " passes_thru_data=" << (titer * size_per_proc / static_cast(corpus.size())); const string svv = vv.str(); - cerr << svv << endl; - Weights::WriteToFile(fname, lambdas, true, &svv); + Weights::WriteToFile(fname, cur_weights, true, &svv); } -#endif vector hgs(size_per_proc); vector gold_hgs(size_per_proc); @@ -287,8 +314,8 @@ int main(int argc, char** argv) { Hypergraph& hg_gold = gold_hgs[i]; if (hg.edges_.size() < 2) continue; - hg.Reweight(lambdas); - hg_gold.Reweight(lambdas); + hg.Reweight(cur_weights); + hg_gold.Reweight(cur_weights); SparseVector model_exp, gold_exp; const prob_t z = InsideOutside gg(FD::NumFeats()); - if (gg.size() != lambdas.size()) { lambdas.resize(gg.size()); } + if (gg.size() != cur_weights.size()) { cur_weights.resize(gg.size()); } for (SparseVector::const_iterator it = g.begin(); it != g.end(); ++it) if (it->first) { gg[it->first] = it->second; } - cerr << "OBJ: " << obj << endl; - o->Optimize(obj, gg, &lambdas); + g.clear(); + double r = ApplyRegularizationTerms(regularization_strength, + time_series_strength * (iter == 0 ? 0.0 : 1.0), + cur_weights, + prev_weights, + &gg); + obj += r; + if (mi == 0 || mi == (minibatch_iterations - 1)) { + if (!mi) cerr << iter << ' '; else cerr << ' '; + cerr << "OBJ=" << obj << " (REG=" << r << ")" << " |g|=" << PNorm(gg, 2) << " |w|=" << PNorm(cur_weights, 2); + if (mi > 0) cerr << endl << flush; else cerr << ' '; + } else { cerr << '.' << flush; } + // cerr << "w = "; VV(cerr, cur_weights); cerr << endl; + // cerr << "g = "; VV(cerr, gg); cerr << endl; + o->Optimize(obj, gg, &cur_weights); } #ifdef HAVE_MPI - broadcast(world, x, 0); + // broadcast(world, x, 0); broadcast(world, converged, 0); world.barrier(); if (rank == 0) { cerr << " ELAPSED TIME THIS ITERATION=" << timer.elapsed() << endl; } #endif } + prev_weights = cur_weights; } return 0; } diff --git a/training/optimize.cc b/training/optimize.cc index f0740d5c..41ac90d8 100644 --- a/training/optimize.cc +++ b/training/optimize.cc @@ -96,6 +96,7 @@ void LBFGSOptimizer::OptimizeImpl(const double& obj, const vector& g, vector* x) { opt_.run(&(*x)[0], obj, &g[0]); - cerr << opt_ << endl; + if (!opt_.requests_f_and_g()) opt_.run(&(*x)[0], obj, &g[0]); + // cerr << opt_ << endl; } diff --git a/word-aligner/aligner.pl b/word-aligner/aligner.pl index 3a385a88..c5078645 100755 --- a/word-aligner/aligner.pl +++ b/word-aligner/aligner.pl @@ -27,11 +27,13 @@ die "Expected format corpus.l1-l2 where l1 & l2 are two-letter abbreviations\nfo my $f_lang = $1; my $e_lang = $2; +print STDERR " Using mkcls in: $mkcls\n\n"; print STDERR "Source language: $f_lang\n"; print STDERR "Target language: $e_lang\n"; -print STDERR " Using mkcls in: $mkcls\n\n"; -die "Don't have an orthographic normalizer for $f_lang\n" unless -f "$SCRIPT_DIR/ortho-norm/$f_lang.pl"; -die "Don't have an orthographic normalizer for $e_lang\n" unless -f "$SCRIPT_DIR/ortho-norm/$e_lang.pl"; +die "Don't have an stemmer for $f_lang: please create $SCRIPT_DIR/stemmers/$f_lang.pl\n" unless -f "$SCRIPT_DIR/stemmers/$f_lang.pl"; +die "Don't have an stemmer for $e_lang: please create $SCRIPT_DIR/stemmers/$e_lang.pl\n" unless -f "$SCRIPT_DIR/stemmers/$e_lang.pl"; +die "Don't have an orthographic normalizer for $f_lang: please create $SCRIPT_DIR/ortho-norm/$f_lang.pl\n" unless -f "$SCRIPT_DIR/ortho-norm/$f_lang.pl"; +die "Don't have an orthographic normalizer for $e_lang: please create $SCRIPT_DIR/ortho-norm/$e_lang.pl\n" unless -f "$SCRIPT_DIR/ortho-norm/$e_lang.pl"; my @directions = qw(f-e); diff --git a/word-aligner/ortho-norm/mg.pl b/word-aligner/ortho-norm/mg.pl new file mode 100755 index 00000000..4cb0e8e7 --- /dev/null +++ b/word-aligner/ortho-norm/mg.pl @@ -0,0 +1,13 @@ +#!/usr/bin/perl -w +use strict; +use utf8; + +binmode(STDIN, ":utf8"); +binmode(STDOUT, ":utf8"); + +while() { + $_ = lc $_; + s/([a-z])'( |$)/$1$2/g; + print; +} + diff --git a/word-aligner/ortho-norm/rw.pl b/word-aligner/ortho-norm/rw.pl new file mode 100755 index 00000000..4cb0e8e7 --- /dev/null +++ b/word-aligner/ortho-norm/rw.pl @@ -0,0 +1,13 @@ +#!/usr/bin/perl -w +use strict; +use utf8; + +binmode(STDIN, ":utf8"); +binmode(STDOUT, ":utf8"); + +while() { + $_ = lc $_; + s/([a-z])'( |$)/$1$2/g; + print; +} + diff --git a/word-aligner/stemmers/mg.pl b/word-aligner/stemmers/mg.pl new file mode 100755 index 00000000..2f79a94e --- /dev/null +++ b/word-aligner/stemmers/mg.pl @@ -0,0 +1,39 @@ +#!/usr/bin/perl -w + +use strict; +use utf8; + +binmode(STDIN, ":utf8"); +binmode(STDOUT,":utf8"); + +my $vocab = undef; +if (scalar @ARGV > 0) { + die "Only allow --vocab" unless ($ARGV[0] eq '--vocab' && scalar @ARGV == 1); + $vocab = 1; +} + +my %dict; +while() { + chomp; + my @words = split /\s+/; + my @out = (); + for my $w (@words) { + my $tw = $dict{$w}; + if (!defined $tw) { + my $el = 5; + if ($w =~ /(ndz|ndr|nts|ntr)/) { $el++; } + if ($w =~ /^(mp|mb|nd)/) { $el++; } + if ($el > length($w)) { $el = length($w); } + $tw = substr $w, 0, $el; + $dict{$w} = $tw; + } + push @out, $tw; + } + if ($vocab) { + die "Expected exactly one word per line with --vocab: $_" unless scalar @out == 1; + print "$_ @out\n"; + } else { + print "@out\n"; + } +} + diff --git a/word-aligner/stemmers/rw.pl b/word-aligner/stemmers/rw.pl new file mode 100755 index 00000000..6d873b40 --- /dev/null +++ b/word-aligner/stemmers/rw.pl @@ -0,0 +1,38 @@ +#!/usr/bin/perl -w + +use strict; +use utf8; + +binmode(STDIN, ":utf8"); +binmode(STDOUT,":utf8"); + +my $vocab = undef; +if (scalar @ARGV > 0) { + die "Only allow --vocab" unless ($ARGV[0] eq '--vocab' && scalar @ARGV == 1); + $vocab = 1; +} + +my %dict; +while() { + chomp; + my @words = split /\s+/; + my @out = (); + for my $w (@words) { + my $tw = $dict{$w}; + if (!defined $tw) { + my $el = 5; + if ($w =~ /(ny|jy|nk|nt|sh|cy)/) { $el++; } + if ($el > length($w)) { $el = length($w); } + $tw = substr $w, 0, $el; + $dict{$w} = $tw; + } + push @out, $tw; + } + if ($vocab) { + die "Expected exactly one word per line with --vocab: $_" unless scalar @out == 1; + print "$_ @out\n"; + } else { + print "@out\n"; + } +} + -- cgit v1.2.3