From d9cc1a6986188a97e09e4c8cef46c34eee5f9cd2 Mon Sep 17 00:00:00 2001 From: Chris Dyer Date: Sun, 10 Nov 2013 00:58:44 -0500 Subject: guard against direct includes of tr1 --- training/crf/mpi_online_optimize.cc | 8 +-- training/mira/kbest_cut_mira.cc | 65 ++++++++++++------------ training/mira/kbest_mira.cc | 18 +++---- training/mira/mira.py | 98 +++++++++++++++++++------------------ training/pro/mr_pro_map.cc | 1 - training/utils/candidate_set.cc | 11 +++-- training/utils/online_optimizer.h | 8 +-- training/utils/optimize_test.cc | 6 +-- 8 files changed, 109 insertions(+), 106 deletions(-) (limited to 'training') diff --git a/training/crf/mpi_online_optimize.cc b/training/crf/mpi_online_optimize.cc index 9e1ae34c..6b5b7d64 100644 --- a/training/crf/mpi_online_optimize.cc +++ b/training/crf/mpi_online_optimize.cc @@ -4,11 +4,11 @@ #include #include #include -#include #include #include #include +#include #include "stringlib.h" #include "verbose.h" @@ -219,7 +219,7 @@ int main(int argc, char** argv) { #endif if (size > 1) SetSilent(true); // turn off verbose decoder output register_feature_functions(); - std::tr1::shared_ptr rng; + boost::shared_ptr rng; po::variables_map conf; if (!InitCommandLine(argc, argv, &conf)) @@ -264,8 +264,8 @@ int main(int argc, char** argv) { ReadTrainingCorpus(conf["training_data"].as(), rank, size, &corpus, &ids); assert(corpus.size() > 0); - std::tr1::shared_ptr o; - std::tr1::shared_ptr lr; + boost::shared_ptr o; + boost::shared_ptr lr; const unsigned size_per_proc = conf["minibatch_size_per_proc"].as(); if (size_per_proc > corpus.size()) { diff --git a/training/mira/kbest_cut_mira.cc b/training/mira/kbest_cut_mira.cc index 59fa860a..3b1108e0 100644 --- a/training/mira/kbest_cut_mira.cc +++ b/training/mira/kbest_cut_mira.cc @@ -30,7 +30,6 @@ #include "sparse_vector.h" using namespace std; -using boost::shared_ptr; namespace po = boost::program_options; bool invert_score; @@ -149,7 +148,7 @@ struct HypothesisInfo { double alpha; double oracle_loss; SparseVector oracle_feat_diff; - shared_ptr oracleN; + boost::shared_ptr oracleN; }; bool ApproxEqual(double a, double b) { @@ -157,7 +156,7 @@ bool ApproxEqual(double a, double b) { return (fabs(a-b)/fabs(b)) < EPSILON; } -typedef shared_ptr HI; +typedef boost::shared_ptr HI; bool HypothesisCompareB(const HI& h1, const HI& h2 ) { return h1->mt_metric > h2->mt_metric; @@ -185,11 +184,11 @@ bool HypothesisCompareG(const HI& h1, const HI& h2 ) }; -void CuttingPlane(vector >* cur_c, bool* again, vector >& all_hyp, vector dense_weights) +void CuttingPlane(vector >* cur_c, bool* again, vector >& all_hyp, vector dense_weights) { bool DEBUG_CUT = false; - shared_ptr max_fear, max_fear_in_set; - vector >& cur_constraint = *cur_c; + boost::shared_ptr max_fear, max_fear_in_set; + vector >& cur_constraint = *cur_c; if(no_reweight) { @@ -235,9 +234,9 @@ void CuttingPlane(vector >* cur_c, bool* again, vecto } -double ComputeDelta(vector >* cur_p, double max_step_size,vector dense_weights ) +double ComputeDelta(vector >* cur_p, double max_step_size,vector dense_weights ) { - vector >& cur_pair = *cur_p; + vector >& cur_pair = *cur_p; double loss = cur_pair[0]->oracle_loss - cur_pair[1]->oracle_loss; double margin = -(cur_pair[0]->oracleN->features.dot(dense_weights)- cur_pair[0]->features.dot(dense_weights)) + (cur_pair[1]->oracleN->features.dot(dense_weights) - cur_pair[1]->features.dot(dense_weights)); @@ -261,12 +260,12 @@ double ComputeDelta(vector >* cur_p, double max_step_ } -vector > SelectPair(vector >* cur_c) +vector > SelectPair(vector >* cur_c) { bool DEBUG_SELECT= false; - vector >& cur_constraint = *cur_c; + vector >& cur_constraint = *cur_c; - vector > pair; + vector > pair; if (no_select || optimizer == 2){ //skip heuristic search and return oracle and fear for pa-mira @@ -278,7 +277,7 @@ vector > SelectPair(vector for(int u=0;u != cur_constraint.size();u++) { - shared_ptr max_fear; + boost::shared_ptr max_fear; if(DEBUG_SELECT) cerr<< "cur alpha " << u << " " << cur_constraint[u]->alpha; for(int i=0; i < cur_constraint.size();i++) //select maximal violator @@ -323,8 +322,8 @@ vector > SelectPair(vector } struct GoodBadOracle { - vector > good; - vector > bad; + vector > good; + vector > bad; }; struct BasicObserver: public DecoderObserver { @@ -367,8 +366,8 @@ struct TrainingObserver : public DecoderObserver { const DocScorer& ds; vector& corpus_bleu_sent_stats; vector& oracles; - vector > cur_best; - shared_ptr cur_oracle; + vector > cur_best; + boost::shared_ptr cur_oracle; const int kbest_size; Hypergraph forest; int cur_sent; @@ -386,7 +385,7 @@ struct TrainingObserver : public DecoderObserver { return *cur_best[0]; } - const vector > GetCurrentBest() const { + const vector > GetCurrentBest() const { return cur_best; } @@ -411,8 +410,8 @@ struct TrainingObserver : public DecoderObserver { } - shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score, const vector& hyp) { - shared_ptr h(new HypothesisInfo); + boost::shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score, const vector& hyp) { + boost::shared_ptr h(new HypothesisInfo); h->features = feats; h->mt_metric = score; h->hyp = hyp; @@ -424,14 +423,14 @@ struct TrainingObserver : public DecoderObserver { if (stream) sent_id = 0; bool PRINT_LIST= false; - vector >& cur_good = oracles[sent_id].good; - vector >& cur_bad = oracles[sent_id].bad; + vector >& cur_good = oracles[sent_id].good; + vector >& cur_bad = oracles[sent_id].bad; //TODO: look at keeping previous iterations hypothesis lists around cur_best.clear(); cur_good.clear(); cur_bad.clear(); - vector > all_hyp; + vector > all_hyp; typedef KBest::KBestDerivations, ESentenceTraversal,Filter> K; K kbest(forest,kbest_size); @@ -527,7 +526,7 @@ struct TrainingObserver : public DecoderObserver { if(PRINT_LIST) { cerr << "GOOD" << endl; for(int u=0;u!=cur_good.size();u++) cerr << cur_good[u]->mt_metric << " " << cur_good[u]->hope << endl;} //use hope for fear selection - shared_ptr& oracleN = cur_good[0]; + boost::shared_ptr& oracleN = cur_good[0]; if(fear_select == 1){ //compute fear hyps with model - bleu if (PRINT_LIST) cerr << "FEAR " << endl; @@ -663,13 +662,13 @@ int main(int argc, char** argv) { invert_score = false; } - shared_ptr ds; + boost::shared_ptr ds; //normal: load references, stream: start stream scorer if (stream) { - ds = shared_ptr(new DocStreamScorer(type, vector(0), "")); + ds = boost::shared_ptr(new DocStreamScorer(type, vector(0), "")); cerr << "Scoring doc stream with " << metric_name << endl; } else { - ds = shared_ptr(new DocScorer(type, conf["reference"].as >(), "")); + ds = boost::shared_ptr(new DocScorer(type, conf["reference"].as >(), "")); cerr << "Loaded " << ds->size() << " references for scoring with " << metric_name << endl; } vector corpus_bleu_sent_stats; @@ -774,9 +773,9 @@ int main(int argc, char** argv) { const HypothesisInfo& cur_good = *oracles[cur_sent].good[0]; const HypothesisInfo& cur_bad = *oracles[cur_sent].bad[0]; - vector >& cur_good_v = oracles[cur_sent].good; - vector >& cur_bad_v = oracles[cur_sent].bad; - vector > cur_best_v = observer.GetCurrentBest(); + vector >& cur_good_v = oracles[cur_sent].good; + vector >& cur_bad_v = oracles[cur_sent].bad; + vector > cur_best_v = observer.GetCurrentBest(); tot_loss += cur_hyp.mt_metric; @@ -824,13 +823,13 @@ int main(int argc, char** argv) { } else if(optimizer == 5) //full mira with n-best list of constraints from hope, fear, model best { - vector > cur_constraint; + vector > cur_constraint; cur_constraint.insert(cur_constraint.begin(), cur_bad_v.begin(), cur_bad_v.end()); cur_constraint.insert(cur_constraint.begin(), cur_best_v.begin(), cur_best_v.end()); cur_constraint.insert(cur_constraint.begin(), cur_good_v.begin(), cur_good_v.end()); bool optimize_again; - vector > cur_pair; + vector > cur_pair; //SMO for(int u=0;u!=cur_constraint.size();u++) cur_constraint[u]->alpha =0; @@ -879,7 +878,7 @@ int main(int argc, char** argv) { else if(optimizer == 2 || optimizer == 3) //PA and Cutting Plane MIRA update { bool DEBUG_SMO= true; - vector > cur_constraint; + vector > cur_constraint; cur_constraint.push_back(cur_good_v[0]); //add oracle to constraint set bool optimize_again = true; int cut_plane_calls = 0; @@ -919,7 +918,7 @@ int main(int argc, char** argv) { while (iter < smo_iter) { //select pair to optimize from constraint set - vector > cur_pair = SelectPair(&cur_constraint); + vector > cur_pair = SelectPair(&cur_constraint); if(cur_pair.empty()){ iter=MAX_SMO; diff --git a/training/mira/kbest_mira.cc b/training/mira/kbest_mira.cc index d59b4224..2868de0c 100644 --- a/training/mira/kbest_mira.cc +++ b/training/mira/kbest_mira.cc @@ -3,10 +3,10 @@ #include #include #include -#include #include #include +#include #include "stringlib.h" #include "hg_sampler.h" @@ -30,7 +30,7 @@ using namespace std; namespace po = boost::program_options; bool invert_score; -std::tr1::shared_ptr rng; +boost::shared_ptr rng; void RandomPermutation(int len, vector* p_ids) { vector& ids = *p_ids; @@ -88,8 +88,8 @@ struct HypothesisInfo { }; struct GoodBadOracle { - std::tr1::shared_ptr good; - std::tr1::shared_ptr bad; + boost::shared_ptr good; + boost::shared_ptr bad; }; struct TrainingObserver : public DecoderObserver { @@ -97,7 +97,7 @@ struct TrainingObserver : public DecoderObserver { const DocumentScorer& ds; const EvaluationMetric& metric; vector& oracles; - std::tr1::shared_ptr cur_best; + boost::shared_ptr cur_best; const int kbest_size; const bool sample_forest; @@ -109,16 +109,16 @@ struct TrainingObserver : public DecoderObserver { UpdateOracles(smeta.GetSentenceID(), *hg); } - std::tr1::shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score) { - std::tr1::shared_ptr h(new HypothesisInfo); + boost::shared_ptr MakeHypothesisInfo(const SparseVector& feats, const double score) { + boost::shared_ptr h(new HypothesisInfo); h->features = feats; h->mt_metric = score; return h; } void UpdateOracles(int sent_id, const Hypergraph& forest) { - std::tr1::shared_ptr& cur_good = oracles[sent_id].good; - std::tr1::shared_ptr& cur_bad = oracles[sent_id].bad; + boost::shared_ptr& cur_good = oracles[sent_id].good; + boost::shared_ptr& cur_bad = oracles[sent_id].bad; cur_bad.reset(); // TODO get rid of?? if (sample_forest) { diff --git a/training/mira/mira.py b/training/mira/mira.py index 29c51e1d..7b2d06a3 100755 --- a/training/mira/mira.py +++ b/training/mira/mira.py @@ -4,8 +4,17 @@ import subprocess, shlex, glob import argparse import logging import random, time -import cdec.score import gzip, itertools +try: + import cdec.score +except ImportError: + sys.stderr.write('Could not import pycdec, see cdec/python/README.md for details\n') + sys.exit(1) +have_mpl = True +try: + import matplotlib.pyplot as plt +except ImportError: + have_mpl = False #mira run script #requires pycdec to be built, since it is used for scoring hypothesis @@ -16,17 +25,17 @@ import gzip, itertools #scoring function using pycdec scoring def fast_score(hyps, refs, metric): scorer = cdec.score.Scorer(metric) - logging.info('loaded {0} references for scoring with {1}\n'.format( + logging.info('loaded {0} references for scoring with {1}'.format( len(refs), metric)) if metric=='BLEU': logging.warning('BLEU is ambiguous, assuming IBM_BLEU\n') metric = 'IBM_BLEU' elif metric=='COMBI': logging.warning('COMBI metric is no longer supported, switching to ' - 'COMB:TER=-0.5;BLEU=0.5\n') + 'COMB:TER=-0.5;BLEU=0.5') metric = 'COMB:TER=-0.5;BLEU=0.5' stats = sum(scorer(r).evaluate(h) for h,r in itertools.izip(hyps,refs)) - logging.info(stats.detail+'\n') + logging.info('Score={} ({})'.format(stats.score, stats.detail)) return stats.score #create new parallel input file in output directory in sgml format @@ -71,6 +80,8 @@ def main(): #set logging to write all info messages to stderr logging.basicConfig(level=logging.INFO) script_dir = os.path.dirname(os.path.abspath(sys.argv[0])) + if not have_mpl: + logging.warning('Failed to import matplotlib, graphs will not be generated.') parser= argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -181,10 +192,11 @@ def main(): dev_size = enseg(args.devset, newdev, args.grammar_prefix) args.devset = newdev - write_config(args) + log_config(args) args.weights, hope_best_fear = optimize(args, script_dir, dev_size) - graph_file = graph(args.output_dir, hope_best_fear, args.metric) + graph_file = '' + if have_mpl: graph_file = graph(args.output_dir, hope_best_fear, args.metric) dev_results, dev_bleu = evaluate(args.devset, args.weights, args.config, script_dir, args.output_dir) @@ -205,17 +217,12 @@ def main(): if graph_file: logging.info('A graph of the best/hope/fear scores over the iterations ' - 'has been saved to {}\n'.format(graph_file)) + 'has been saved to {}'.format(graph_file)) print 'final weights:\n{}\n'.format(args.weights) #graph of hope/best/fear metric values across all iterations def graph(output_dir, hope_best_fear, metric): - try: - import matplotlib.pyplot as plt - except ImportError: - logging.error('Error importing matplotlib. Graphing disabled.\n') - return '' max_y = float(max(hope_best_fear['best']))*1.5 plt.plot(hope_best_fear['best'], label='best') plt.plot(hope_best_fear['hope'], label='hope') @@ -308,6 +315,7 @@ def optimize(args, script_dir, dev_size): decoder = script_dir+'/kbest_cut_mira' (source, refs) = split_devset(args.devset, args.output_dir) port = random.randint(15000,50000) + logging.info('using port {}'.format(port)) num_features = 0 last_p_score = 0 best_score_iter = -1 @@ -316,8 +324,8 @@ def optimize(args, script_dir, dev_size): hope_best_fear = {'hope':[],'best':[],'fear':[]} #main optimization loop while i best_score: best_score_iter = i @@ -436,12 +441,13 @@ def optimize(args, script_dir, dev_size): new_weights_file = '{}/weights.{}'.format(args.output_dir, i+1) last_weights_file = '{}/weights.{}'.format(args.output_dir, i) i += 1 - weight_files = weightdir+'/weights.mira-pass*.*[0-9].gz' + weight_files = args.output_dir+'/weights.pass*/weights.mira-pass*[0-9].gz' average_weights(new_weights_file, weight_files) - logging.info('\nBEST ITER: {} :: {}\n\n'.format( + logging.info('BEST ITERATION: {} (SCORE={})'.format( best_score_iter, best_score)) weights_final = args.output_dir+'/weights.final' + logging.info('WEIGHTS FILE: {}'.format(weights_final)) shutil.copy(last_weights_file, weights_final) average_final_weights(args.output_dir) @@ -481,15 +487,15 @@ def gzip_file(filename): #average the weights for a given pass def average_weights(new_weights, weight_files): - logging.info('AVERAGE {} {}\n'.format(new_weights, weight_files)) + logging.info('AVERAGE {} {}'.format(new_weights, weight_files)) feature_weights = {} total_mult = 0.0 for path in glob.glob(weight_files): score = gzip.open(path) mult = 0 - logging.info('FILE {}\n'.format(path)) + logging.info(' FILE {}'.format(path)) msg, ran, mult = score.readline().strip().split(' ||| ') - logging.info('Processing {} {}'.format(ran, mult)) + logging.info(' Processing {} {}'.format(ran, mult)) for line in score: f,w = line.split(' ',1) if f in feature_weights: @@ -500,34 +506,30 @@ def average_weights(new_weights, weight_files): score.close() #write new weights to outfile + logging.info('Writing averaged weights to {}'.format(new_weights)) out = open(new_weights, 'w') for f in iter(feature_weights): avg = feature_weights[f]/total_mult - logging.info('{} {} {} ||| Printing {} {}\n'.format(f,feature_weights[f], - total_mult, f, avg)) out.write('{} {}\n'.format(f,avg)) -def write_config(args): - config = ('\n' - 'DECODER: ' - '/usr0/home/eschling/cdec/training/mira/kbest_cut_mira\n' - 'INI FILE: '+args.config+'\n' - 'WORKING DIRECTORY: '+args.output_dir+'\n' - 'DEVSET: '+args.devset+'\n' - 'EVAL METRIC: '+args.metric+'\n' - 'MAX ITERATIONS: '+str(args.max_iterations)+'\n' - 'DECODE NODES: '+str(args.jobs)+'\n' - 'INITIAL WEIGHTS: '+args.weights+'\n') +def log_config(args): + logging.info('WORKING DIRECTORY={}'.format(args.output_dir)) + logging.info('INI FILE={}'.format(args.config)) + logging.info('DEVSET={}'.format(args.devset)) + logging.info('EVAL METRIC={}'.format(args.metric)) + logging.info('MAX ITERATIONS={}'.format(args.max_iterations)) + logging.info('PARALLEL JOBS={}'.format(args.jobs)) + logging.info('INITIAL WEIGHTS={}'.format(args.weights)) if args.grammar_prefix: - config += 'GRAMMAR PREFIX: '+str(args.grammar_prefix)+'\n' + logging.info('GRAMMAR PREFIX={}'.format(args.grammar_prefix)) if args.test: - config += 'TEST SET: '+args.test+'\n' + logging.info('TEST SET={}'.format(args.test)) + else: + logging.info('TEST SET=none specified') if args.test_config: - config += 'TEST CONFIG: '+args.test_config+'\n' + logging.info('TEST CONFIG={}'.format(args.test_config)) if args.email: - config += 'EMAIL: '+args.email+'\n' - - logging.info(config) + logging.info('EMAIL={}'.format(args.email)) if __name__=='__main__': main() diff --git a/training/pro/mr_pro_map.cc b/training/pro/mr_pro_map.cc index eef40b8a..a5e6e48f 100644 --- a/training/pro/mr_pro_map.cc +++ b/training/pro/mr_pro_map.cc @@ -2,7 +2,6 @@ #include #include #include -#include #include #include diff --git a/training/utils/candidate_set.cc b/training/utils/candidate_set.cc index 087efec3..1dec9609 100644 --- a/training/utils/candidate_set.cc +++ b/training/utils/candidate_set.cc @@ -1,6 +1,11 @@ #include "candidate_set.h" -#include +#ifdef HAVE_CXX11 +# include +#else +# include +namespace std { using std::tr1::unordered_set; } +#endif #include @@ -139,12 +144,12 @@ void CandidateSet::ReadFromFile(const string& file) { void CandidateSet::Dedup() { if(!SILENT) cerr << "Dedup in=" << cs.size(); - tr1::unordered_set u; + unordered_set u; while(cs.size() > 0) { u.insert(cs.back()); cs.pop_back(); } - tr1::unordered_set::iterator it = u.begin(); + unordered_set::iterator it = u.begin(); while (it != u.end()) { cs.push_back(*it); it = u.erase(it); diff --git a/training/utils/online_optimizer.h b/training/utils/online_optimizer.h index 28d89344..19223e9d 100644 --- a/training/utils/online_optimizer.h +++ b/training/utils/online_optimizer.h @@ -1,10 +1,10 @@ #ifndef _ONL_OPTIMIZE_H_ #define _ONL_OPTIMIZE_H_ -#include #include #include #include +#include #include "sparse_vector.h" struct LearningRateSchedule { @@ -56,7 +56,7 @@ struct ExponentialDecayLearningRate : public LearningRateSchedule { class OnlineOptimizer { public: virtual ~OnlineOptimizer(); - OnlineOptimizer(const std::tr1::shared_ptr& s, + OnlineOptimizer(const boost::shared_ptr& s, size_t batch_size, const std::vector& frozen_feats = std::vector()) : N_(batch_size),schedule_(s),k_() { @@ -77,13 +77,13 @@ class OnlineOptimizer { std::set frozen_; // frozen (non-optimizing) features private: - std::tr1::shared_ptr schedule_; + boost::shared_ptr schedule_; int k_; // iteration count }; class CumulativeL1OnlineOptimizer : public OnlineOptimizer { public: - CumulativeL1OnlineOptimizer(const std::tr1::shared_ptr& s, + CumulativeL1OnlineOptimizer(const boost::shared_ptr& s, size_t training_instances, double C, const std::vector& frozen) : OnlineOptimizer(s, training_instances, frozen), C_(C), u_() {} diff --git a/training/utils/optimize_test.cc b/training/utils/optimize_test.cc index bff2ca03..72fcef6d 100644 --- a/training/utils/optimize_test.cc +++ b/training/utils/optimize_test.cc @@ -2,6 +2,7 @@ #include #include #include +#include #include "optimize.h" #include "online_optimizer.h" #include "sparse_vector.h" @@ -96,14 +97,11 @@ void TestOptimizerVariants(int num_vars) { cerr << oa.Name() << " SUCCESS\n"; } -using namespace std::tr1; - void TestOnline() { size_t N = 20; double C = 1.0; double eta0 = 0.2; - std::tr1::shared_ptr r(new ExponentialDecayLearningRate(N, eta0, 0.85)); - //shared_ptr r(new StandardLearningRate(N, eta0)); + boost::shared_ptr r(new ExponentialDecayLearningRate(N, eta0, 0.85)); CumulativeL1OnlineOptimizer opt(r, N, C, std::vector()); assert(r->eta(10) < r->eta(1)); } -- cgit v1.2.3