summaryrefslogtreecommitdiff
path: root/training/entropy.cc
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-07-08 14:26:51 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-07-08 14:26:51 +0200
commitc139ce495861bb341e1b86a85ad4559f9ad53c14 (patch)
tree1071839ee458f21f169ce06fc536fefe07e4c65d /training/entropy.cc
parent3a94ac22e5c60aa205f2b3dadf81b0666500e0c3 (diff)
parentd01e5b66d3010d61b9b56301fd7f302dd4ea5bc8 (diff)
Merge branch 'master' of github.com:pks/cdec-dtrain
Diffstat (limited to 'training/entropy.cc')
-rw-r--r--training/entropy.cc41
1 files changed, 41 insertions, 0 deletions
diff --git a/training/entropy.cc b/training/entropy.cc
new file mode 100644
index 00000000..4fdbe2be
--- /dev/null
+++ b/training/entropy.cc
@@ -0,0 +1,41 @@
+#include "entropy.h"
+
+#include "prob.h"
+#include "candidate_set.h"
+
+using namespace std;
+
+namespace training {
+
+// see Mann and McCallum "Efficient Computation of Entropy Gradient ..." for
+// a mostly clear derivation of:
+// g = E[ F(x,y) * log p(y|x) ] + H(y | x) * E[ F(x,y) ]
+double CandidateSetEntropy::operator()(const vector<double>& params,
+ SparseVector<double>* g) const {
+ prob_t z;
+ vector<double> dps(cands_.size());
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ dps[i] = cands_[i].fmap.dot(params);
+ const prob_t u(dps[i], init_lnx());
+ z += u;
+ }
+ const double log_z = log(z);
+
+ SparseVector<double> exp_feats;
+ double entropy = 0;
+ for (unsigned i = 0; i < cands_.size(); ++i) {
+ const double log_prob = cands_[i].fmap.dot(params) - log_z;
+ const double prob = exp(log_prob);
+ const double e_logprob = prob * log_prob;
+ entropy -= e_logprob;
+ if (g) {
+ (*g) += cands_[i].fmap * e_logprob;
+ exp_feats += cands_[i].fmap * prob;
+ }
+ }
+ if (g) (*g) += exp_feats * entropy;
+ return entropy;
+}
+
+}
+