summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--pro-train/mr_pro_map.cc4
-rw-r--r--rampion/Makefile.am4
-rwxr-xr-xrampion/rampion.pl16
-rw-r--r--rampion/rampion_cccp.cc69
-rw-r--r--training/candidate_set.cc6
-rw-r--r--training/candidate_set.h21
-rw-r--r--training/mpi_flex_optimize.cc10
7 files changed, 78 insertions, 52 deletions
diff --git a/pro-train/mr_pro_map.cc b/pro-train/mr_pro_map.cc
index bb13fdf4..eef40b8a 100644
--- a/pro-train/mr_pro_map.cc
+++ b/pro-train/mr_pro_map.cc
@@ -101,8 +101,8 @@ void Sample(const unsigned gamma,
const size_t a = rng->inclusive(0, J_i.size() - 1)();
const size_t b = rng->inclusive(0, J_i.size() - 1)();
if (a == b) continue;
- float ga = metric->ComputeScore(J_i[a].score_stats);
- float gb = metric->ComputeScore(J_i[b].score_stats);
+ float ga = metric->ComputeScore(J_i[a].eval_feats);
+ float gb = metric->ComputeScore(J_i[b].eval_feats);
bool positive = gb < ga;
if (invert_score) positive = !positive;
const float gdiff = fabs(ga - gb);
diff --git a/rampion/Makefile.am b/rampion/Makefile.am
index 12df39c2..f4dbb7cc 100644
--- a/rampion/Makefile.am
+++ b/rampion/Makefile.am
@@ -1,6 +1,6 @@
bin_PROGRAMS = rampion_cccp
rampion_cccp_SOURCES = rampion_cccp.cc
-rampion_cccp_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz
+rampion_cccp_LDADD = $(top_srcdir)/training/libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz
-AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval -I$(top_srcdir)/training
+AM_CPPFLAGS = -W -Wall $(GTEST_CPPFLAGS) -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval -I$(top_srcdir)/training
diff --git a/rampion/rampion.pl b/rampion/rampion.pl
index 9884f453..55f7b3f1 100755
--- a/rampion/rampion.pl
+++ b/rampion/rampion.pl
@@ -65,12 +65,14 @@ my $cpbin=1;
my $tune_regularizer = 0;
my $reg = 500;
my $reg_previous = 5000;
+my $dont_accum = 0;
# Process command-line options
Getopt::Long::Configure("no_auto_abbrev");
if (GetOptions(
"jobs=i" => \$jobs,
"dont-clean" => \$disable_clean,
+ "dont-accumulate" => \$dont_accum,
"pass-suffix=s" => \$pass_suffix,
"qsub" => \$useqsub,
"dry-run" => \$dryrun,
@@ -163,8 +165,6 @@ my $decoderBase = check_output("basename $decoder"); chomp $decoderBase;
my $newIniFile = "$dir/$decoderBase.ini";
my $inputFileName = "$dir/input";
my $user = $ENV{"USER"};
-
-
# process ini file
-e $iniFile || die "Error: could not open $iniFile for reading\n";
open(INI, $iniFile);
@@ -229,6 +229,13 @@ close F;
unless($best_weights){ $best_weights = $weights; }
unless($projected_score){ $projected_score = 0.0; }
$seen_weights{$weights} = 1;
+my $kbest = "$dir/kbest";
+if ($dont_accum) {
+ $kbest = '';
+} else {
+ check_call("mkdir -p $kbest");
+ $kbest = "--kbest_repository $kbest";
+}
my $random_seed = int(time / 1000);
my $lastWeightsFile;
@@ -305,7 +312,7 @@ while (1){
$cmd="$MAPINPUT $dir/hgs > $dir/agenda.$im1";
print STDERR "COMMAND:\n$cmd\n";
check_call($cmd);
- $cmd="$MAPPER $refs_comma_sep -m $metric -i $dir/agenda.$im1 -w $inweights > $outweights";
+ $cmd="$MAPPER $refs_comma_sep -m $metric -i $dir/agenda.$im1 $kbest -w $inweights > $outweights";
check_call($cmd);
$lastWeightsFile = $outweights;
$iteration++;
@@ -445,6 +452,9 @@ General options:
--help
Print this message and exit.
+ --dont-accumulate
+ Don't accumulate k-best lists from multiple iterations.
+
--max-iterations <M>
Maximum number of iterations to run. If not specified, defaults
to $default_max_iter.
diff --git a/rampion/rampion_cccp.cc b/rampion/rampion_cccp.cc
index 7a6f1f0c..1e36dc51 100644
--- a/rampion/rampion_cccp.cc
+++ b/rampion/rampion_cccp.cc
@@ -14,6 +14,7 @@
#include "viterbi.h"
#include "ns.h"
#include "ns_docscorer.h"
+#include "candidate_set.h"
using namespace std;
namespace po = boost::program_options;
@@ -25,6 +26,7 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
("weights,w",po::value<string>(), "[REQD] Weights files from current iterations")
("input,i",po::value<string>()->default_value("-"), "Input file to map (- is STDIN)")
("evaluation_metric,m",po::value<string>()->default_value("IBM_BLEU"), "Evaluation metric (ibm_bleu, koehn_bleu, nist_bleu, ter, meteor, etc.)")
+ ("kbest_repository,R",po::value<string>(), "Accumulate k-best lists from previous iterations (parameter is path to repository)")
("kbest_size,k",po::value<unsigned>()->default_value(500u), "Top k-hypotheses to extract")
("cccp_iterations,I", po::value<unsigned>()->default_value(10u), "CCCP iterations (T')")
("ssd_iterations,J", po::value<unsigned>()->default_value(5u), "Stochastic subgradient iterations (T'')")
@@ -50,38 +52,36 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
}
}
-struct HypInfo {
- HypInfo() : g(-100.0f) {}
- HypInfo(const vector<WordID>& h,
- const SparseVector<weight_t>& feats,
- const SegmentEvaluator& scorer, const EvaluationMetric* metric) : hyp(h), x(feats) {
- SufficientStats ss;
- scorer.Evaluate(hyp, &ss);
- g = metric->ComputeScore(ss);
+struct GainFunction {
+ explicit GainFunction(const EvaluationMetric* m) : metric(m) {}
+ float operator()(const SufficientStats& eval_feats) const {
+ float g = metric->ComputeScore(eval_feats);
if (!metric->IsErrorMetric()) g = 1 - g;
+ return g;
}
-
- vector<WordID> hyp;
- float g;
- SparseVector<weight_t> x;
+ const EvaluationMetric* metric;
};
-void CostAugmentedSearch(const vector<HypInfo>& kbest,
+template <typename GainFunc>
+void CostAugmentedSearch(const GainFunc& gain,
+ const training::CandidateSet& cs,
const SparseVector<double>& w,
double alpha,
SparseVector<double>* fmap) {
unsigned best_i = 0;
double best = -numeric_limits<double>::infinity();
- for (unsigned i = 0; i < kbest.size(); ++i) {
- double s = kbest[i].x.dot(w) + alpha * kbest[i].g;
+ for (unsigned i = 0; i < cs.size(); ++i) {
+ double s = cs[i].fmap.dot(w) + alpha * gain(cs[i].eval_feats);
if (s > best) {
best = s;
best_i = i;
}
}
- *fmap = kbest[best_i].x;
+ *fmap = cs[best_i].fmap;
}
+
+
// runs lines 4--15 of rampion algorithm
int main(int argc, char** argv) {
po::variables_map conf;
@@ -97,6 +97,11 @@ int main(int argc, char** argv) {
Hypergraph hg;
string last_file;
ReadFile in_read(conf["input"].as<string>());
+ string kbest_repo;
+ if (conf.count("kbest_repository")) {
+ kbest_repo = conf["kbest_repository"].as<string>();
+ MkDirP(kbest_repo);
+ }
istream &in=*in_read.stream();
const unsigned kbest_size = conf["kbest_size"].as<unsigned>();
const unsigned tp = conf["cccp_iterations"].as<unsigned>();
@@ -112,40 +117,44 @@ int main(int argc, char** argv) {
Weights::InitSparseVector(vweights, &weights);
}
string line, file;
- vector<vector<HypInfo> > kis;
+ vector<training::CandidateSet> kis;
cerr << "Loading hypergraphs...\n";
while(getline(in, line)) {
istringstream is(line);
int sent_id;
kis.resize(kis.size() + 1);
- vector<HypInfo>& curkbest = kis.back();
+ training::CandidateSet& curkbest = kis.back();
+ string kbest_file;
+ if (kbest_repo.size()) {
+ ostringstream os;
+ os << kbest_repo << "/kbest." << sent_id << ".txt.gz";
+ kbest_file = os.str();
+ if (FileExists(kbest_file))
+ curkbest.ReadFromFile(kbest_file);
+ }
is >> file >> sent_id;
ReadFile rf(file);
if (kis.size() % 5 == 0) { cerr << '.'; }
if (kis.size() % 200 == 0) { cerr << " [" << kis.size() << "]\n"; }
HypergraphIO::ReadFromJSON(rf.stream(), &hg);
hg.Reweight(weights);
- KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(hg, kbest_size);
-
- for (int i = 0; i < kbest_size; ++i) {
- const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d =
- kbest.LazyKthBest(hg.nodes_.size() - 1, i);
- if (!d) break;
- curkbest.push_back(HypInfo(d->yield, d->feature_values, *ds[sent_id], metric));
- }
+ curkbest.AddKBestCandidates(hg, kbest_size, ds[sent_id]);
+ if (kbest_file.size())
+ curkbest.WriteToFile(kbest_file);
}
cerr << "\nHypergraphs loaded.\n";
vector<SparseVector<weight_t> > goals(kis.size()); // f(x_i,y+,h+)
SparseVector<weight_t> fear; // f(x,y-,h-)
+ const GainFunction gain(metric);
for (unsigned iterp = 1; iterp <= tp; ++iterp) {
cerr << "CCCP Iteration " << iterp << endl;
- for (int i = 0; i < goals.size(); ++i)
- CostAugmentedSearch(kis[i], weights, goodsign * alpha, &goals[i]);
+ for (unsigned i = 0; i < goals.size(); ++i)
+ CostAugmentedSearch(gain, kis[i], weights, goodsign * alpha, &goals[i]);
for (unsigned iterpp = 1; iterpp <= tpp; ++iterpp) {
cerr << " SSD Iteration " << iterpp << endl;
- for (int i = 0; i < goals.size(); ++i) {
- CostAugmentedSearch(kis[i], weights, badsign * alpha, &fear);
+ for (unsigned i = 0; i < goals.size(); ++i) {
+ CostAugmentedSearch(gain, kis[i], weights, badsign * alpha, &fear);
weights -= weights * (eta * reg / goals.size());
weights += (goals[i] - fear) * eta;
}
diff --git a/training/candidate_set.cc b/training/candidate_set.cc
index e2ca9ad2..8c086ece 100644
--- a/training/candidate_set.cc
+++ b/training/candidate_set.cc
@@ -112,7 +112,7 @@ void CandidateSet::WriteToFile(const string& file) const {
for (unsigned i = 0; i < cs.size(); ++i) {
out << TD::GetString(cs[i].ewords) << endl;
out << cs[i].fmap << endl;
- cs[i].score_stats.Encode(&ss);
+ cs[i].eval_feats.Encode(&ss);
out << ss << endl;
}
}
@@ -131,7 +131,7 @@ void CandidateSet::ReadFromFile(const string& file) {
cs.push_back(Candidate());
TD::ConvertSentence(cand, &cs.back().ewords);
ParseSparseVector(feats, 0, &cs.back().fmap);
- cs.back().score_stats = SufficientStats(ss);
+ cs.back().eval_feats = SufficientStats(ss);
}
cerr << " read " << cs.size() << " candidates\n";
}
@@ -160,7 +160,7 @@ void CandidateSet::AddKBestCandidates(const Hypergraph& hg, size_t kbest_size, c
if (!d) break;
cs.push_back(Candidate(d->yield, d->feature_values));
if (scorer)
- scorer->Evaluate(d->yield, &cs.back().score_stats);
+ scorer->Evaluate(d->yield, &cs.back().eval_feats);
}
Dedup();
}
diff --git a/training/candidate_set.h b/training/candidate_set.h
index 824a4de2..9d326ed0 100644
--- a/training/candidate_set.h
+++ b/training/candidate_set.h
@@ -15,16 +15,25 @@ namespace training {
struct Candidate {
Candidate() {}
Candidate(const std::vector<WordID>& e, const SparseVector<double>& fm) :
- ewords(e),
- fmap(fm) {}
- std::vector<WordID> ewords;
- SparseVector<double> fmap;
- SufficientStats score_stats;
+ ewords(e),
+ fmap(fm) {}
+ Candidate(const std::vector<WordID>& e,
+ const SparseVector<double>& fm,
+ const SegmentEvaluator& se) :
+ ewords(e),
+ fmap(fm) {
+ se.Evaluate(ewords, &eval_feats);
+ }
+
void swap(Candidate& other) {
- score_stats.swap(other.score_stats);
+ eval_feats.swap(other.eval_feats);
ewords.swap(other.ewords);
fmap.swap(other.fmap);
}
+
+ std::vector<WordID> ewords;
+ SparseVector<double> fmap;
+ SufficientStats eval_feats;
};
// represents some kind of collection of translation candidates, e.g.
diff --git a/training/mpi_flex_optimize.cc b/training/mpi_flex_optimize.cc
index a9197208..a9ead018 100644
--- a/training/mpi_flex_optimize.cc
+++ b/training/mpi_flex_optimize.cc
@@ -179,18 +179,16 @@ double ApplyRegularizationTerms(const double C,
const double T,
const vector<double>& weights,
const vector<double>& prev_weights,
- vector<double>* g) {
- assert(weights.size() == g->size());
+ double* g) {
double reg = 0;
for (size_t i = 0; i < weights.size(); ++i) {
const double prev_w_i = (i < prev_weights.size() ? prev_weights[i] : 0.0);
const double& w_i = weights[i];
- double& g_i = (*g)[i];
reg += C * w_i * w_i;
- g_i += 2 * C * w_i;
+ g[i] += 2 * C * w_i;
reg += T * (w_i - prev_w_i) * (w_i - prev_w_i);
- g_i += 2 * T * (w_i - prev_w_i);
+ g[i] += 2 * T * (w_i - prev_w_i);
}
return reg;
}
@@ -365,7 +363,7 @@ int main(int argc, char** argv) {
time_series_strength, // * (iter == 0 ? 0.0 : 1.0),
cur_weights,
prev_weights,
- &gg);
+ &gg[0]);
obj += r;
if (mi == 0 || mi == (minibatch_iterations - 1)) {
if (!mi) cerr << iter << ' '; else cerr << ' ';