summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
authorKenneth Heafield <github@kheafield.com>2012-08-03 07:46:54 -0400
committerKenneth Heafield <github@kheafield.com>2012-08-03 07:46:54 -0400
commit122f46c31102b683eaab3ad81a3a98accbc694bb (patch)
tree8d499d789b159ebed25bb23b6983813d064a6296 /training
parentac664bdb0e481539cf77098a7dd0e1ec8d937ba0 (diff)
parent193d137056c3c4f73d66f8db84691d63307de894 (diff)
Merge branch 'master' of github.com:redpony/cdec
Diffstat (limited to 'training')
-rw-r--r--training/Makefile.am4
-rw-r--r--training/candidate_set.cc9
-rw-r--r--training/entropy.cc41
-rw-r--r--training/entropy.h22
-rw-r--r--training/grammar_convert.cc27
-rw-r--r--training/mpi_batch_optimize.cc3
-rw-r--r--training/risk.cc45
-rw-r--r--training/risk.h26
8 files changed, 171 insertions, 6 deletions
diff --git a/training/Makefile.am b/training/Makefile.am
index 19ee8f0d..4cef0d5b 100644
--- a/training/Makefile.am
+++ b/training/Makefile.am
@@ -26,8 +26,10 @@ TESTS = lbfgs_test optimize_test
noinst_LIBRARIES = libtraining.a
libtraining_a_SOURCES = \
candidate_set.cc \
+ entropy.cc \
optimize.cc \
- online_optimizer.cc
+ online_optimizer.cc \
+ risk.cc
mpi_online_optimize_SOURCES = mpi_online_optimize.cc
mpi_online_optimize_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
diff --git a/training/candidate_set.cc b/training/candidate_set.cc
index 8c086ece..087efec3 100644
--- a/training/candidate_set.cc
+++ b/training/candidate_set.cc
@@ -4,6 +4,7 @@
#include <boost/functional/hash.hpp>
+#include "verbose.h"
#include "ns.h"
#include "filelib.h"
#include "wordid.h"
@@ -118,7 +119,7 @@ void CandidateSet::WriteToFile(const string& file) const {
}
void CandidateSet::ReadFromFile(const string& file) {
- cerr << "Reading candidates from " << file << endl;
+ if(!SILENT) cerr << "Reading candidates from " << file << endl;
ReadFile rf(file);
istream& in = *rf.stream();
string cand;
@@ -133,11 +134,11 @@ void CandidateSet::ReadFromFile(const string& file) {
ParseSparseVector(feats, 0, &cs.back().fmap);
cs.back().eval_feats = SufficientStats(ss);
}
- cerr << " read " << cs.size() << " candidates\n";
+ if(!SILENT) cerr << " read " << cs.size() << " candidates\n";
}
void CandidateSet::Dedup() {
- cerr << "Dedup in=" << cs.size();
+ if(!SILENT) cerr << "Dedup in=" << cs.size();
tr1::unordered_set<Candidate, CandidateHasher, CandidateCompare> u;
while(cs.size() > 0) {
u.insert(cs.back());
@@ -148,7 +149,7 @@ void CandidateSet::Dedup() {
cs.push_back(*it);
it = u.erase(it);
}
- cerr << " out=" << cs.size() << endl;
+ if(!SILENT) cerr << " out=" << cs.size() << endl;
}
void CandidateSet::AddKBestCandidates(const Hypergraph& hg, size_t kbest_size, const SegmentEvaluator* scorer) {
diff --git a/training/entropy.cc b/training/entropy.cc
new file mode 100644
index 00000000..4fdbe2be
--- /dev/null
+++ b/training/entropy.cc
@@ -0,0 +1,41 @@
+#include "entropy.h"
+
+#include "prob.h"
+#include "candidate_set.h"
+
+using namespace std;
+
+namespace training {
+
+// see Mann and McCallum "Efficient Computation of Entropy Gradient ..." for
+// a mostly clear derivation of:
+// g = E[ F(x,y) * log p(y|x) ] + H(y | x) * E[ F(x,y) ]
+double CandidateSetEntropy::operator()(const vector<double>& params,
+ SparseVector<double>* g) const {
+ prob_t z;
+ vector<double> dps(cands_.size());
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ dps[i] = cands_[i].fmap.dot(params);
+ const prob_t u(dps[i], init_lnx());
+ z += u;
+ }
+ const double log_z = log(z);
+
+ SparseVector<double> exp_feats;
+ double entropy = 0;
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ const double log_prob = cands_[i].fmap.dot(params) - log_z;
+ const double prob = exp(log_prob);
+ const double e_logprob = prob * log_prob;
+ entropy -= e_logprob;
+ if (g) {
+ (*g) += cands_[i].fmap * e_logprob;
+ exp_feats += cands_[i].fmap * prob;
+ }
+ }
+ if (g) (*g) += exp_feats * entropy;
+ return entropy;
+}
+
+}
+
diff --git a/training/entropy.h b/training/entropy.h
new file mode 100644
index 00000000..796589ca
--- /dev/null
+++ b/training/entropy.h
@@ -0,0 +1,22 @@
+#ifndef _CSENTROPY_H_
+#define _CSENTROPY_H_
+
+#include <vector>
+#include "sparse_vector.h"
+
+namespace training {
+ class CandidateSet;
+
+ class CandidateSetEntropy {
+ public:
+ explicit CandidateSetEntropy(const CandidateSet& cs) : cands_(cs) {}
+ // compute the entropy (expected log likelihood) of a CandidateSet
+ // (optional) the gradient of the entropy with respect to params
+ double operator()(const std::vector<double>& params,
+ SparseVector<double>* g = NULL) const;
+ private:
+ const CandidateSet& cands_;
+ };
+};
+
+#endif
diff --git a/training/grammar_convert.cc b/training/grammar_convert.cc
index bf8abb26..607a7cb9 100644
--- a/training/grammar_convert.cc
+++ b/training/grammar_convert.cc
@@ -9,6 +9,7 @@
#include <boost/lexical_cast.hpp>
#include <boost/program_options.hpp>
+#include "inside_outside.h"
#include "tdict.h"
#include "filelib.h"
#include "hg.h"
@@ -69,6 +70,32 @@ void FilterAndCheckCorrectness(int goal, Hypergraph* hg) {
if (hg->nodes_.size() != old_size) {
cerr << "Warning! During sorting " << (old_size - hg->nodes_.size()) << " disappeared!\n";
}
+ vector<double> inside; // inside score at each node
+ double p = Inside<double, TransitionCountWeightFunction>(*hg, &inside);
+ if (!p) {
+ cerr << "Warning! Grammar defines the empty language!\n";
+ hg->clear();
+ return;
+ }
+ vector<bool> prune(hg->edges_.size(), false);
+ int bad_edges = 0;
+ for (unsigned i = 0; i < hg->edges_.size(); ++i) {
+ Hypergraph::Edge& edge = hg->edges_[i];
+ bool bad = false;
+ for (unsigned j = 0; j < edge.tail_nodes_.size(); ++j) {
+ if (!inside[edge.tail_nodes_[j]]) {
+ bad = true;
+ ++bad_edges;
+ }
+ }
+ prune[i] = bad;
+ }
+ cerr << "Removing " << bad_edges << " bad edges from the grammar.\n";
+ for (unsigned i = 0; i < hg->edges_.size(); ++i) {
+ if (prune[i])
+ cerr << " " << hg->edges_[i].rule_->AsString() << endl;
+ }
+ hg->PruneEdges(prune);
}
void CreateEdge(const TRulePtr& r, const Hypergraph::TailNodeVector& tail, Hypergraph::Node* head_node, Hypergraph* hg) {
diff --git a/training/mpi_batch_optimize.cc b/training/mpi_batch_optimize.cc
index 0db062a7..6432f4a2 100644
--- a/training/mpi_batch_optimize.cc
+++ b/training/mpi_batch_optimize.cc
@@ -310,7 +310,8 @@ int main(int argc, char** argv) {
reduce(world, cllh_observer.acc_obj, test_objective, std::plus<double>(), 0);
reduce(world, cllh_observer.trg_words, test_total_words, std::plus<unsigned>(), 0);
#else
- test_objective = observer.acc_obj;
+ test_objective = cllh_observer.acc_obj;
+ test_total_words = cllh_observer.trg_words;
#endif
if (rank == 0) { // run optimizer only on rank=0 node
diff --git a/training/risk.cc b/training/risk.cc
new file mode 100644
index 00000000..d5a12cfd
--- /dev/null
+++ b/training/risk.cc
@@ -0,0 +1,45 @@
+#include "risk.h"
+
+#include "prob.h"
+#include "candidate_set.h"
+#include "ns.h"
+
+using namespace std;
+
+namespace training {
+
+// g = \sum_e p(e|f) * loss(e) * (phi(e,f) - E[phi(e,f)])
+double CandidateSetRisk::operator()(const vector<double>& params,
+ SparseVector<double>* g) const {
+ prob_t z;
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ const prob_t u(cands_[i].fmap.dot(params), init_lnx());
+ z += u;
+ }
+ const double log_z = log(z);
+
+ SparseVector<double> exp_feats;
+ if (g) {
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ const double log_prob = cands_[i].fmap.dot(params) - log_z;
+ const double prob = exp(log_prob);
+ exp_feats += cands_[i].fmap * prob;
+ }
+ }
+
+ double risk = 0;
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ const double log_prob = cands_[i].fmap.dot(params) - log_z;
+ const double prob = exp(log_prob);
+ const double cost = metric_.IsErrorMetric() ? metric_.ComputeScore(cands_[i].eval_feats)
+ : 1.0 - metric_.ComputeScore(cands_[i].eval_feats);
+ const double r = prob * cost;
+ risk += r;
+ if (g) (*g) += (cands_[i].fmap - exp_feats) * r;
+ }
+ return risk;
+}
+
+}
+
+
diff --git a/training/risk.h b/training/risk.h
new file mode 100644
index 00000000..2e8db0fb
--- /dev/null
+++ b/training/risk.h
@@ -0,0 +1,26 @@
+#ifndef _RISK_H_
+#define _RISK_H_
+
+#include <vector>
+#include "sparse_vector.h"
+class EvaluationMetric;
+
+namespace training {
+ class CandidateSet;
+
+ class CandidateSetRisk {
+ public:
+ explicit CandidateSetRisk(const CandidateSet& cs, const EvaluationMetric& metric) :
+ cands_(cs),
+ metric_(metric) {}
+ // compute the risk (expected loss) of a CandidateSet
+ // (optional) the gradient of the risk with respect to params
+ double operator()(const std::vector<double>& params,
+ SparseVector<double>* g = NULL) const;
+ private:
+ const CandidateSet& cands_;
+ const EvaluationMetric& metric_;
+ };
+};
+
+#endif