summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--decoder/Makefile.am6
-rw-r--r--decoder/cdec_ff.cc25
-rw-r--r--decoder/ff_parse_match.cc218
-rw-r--r--decoder/ff_parse_match.h25
-rw-r--r--decoder/ff_soft_syntax.cc201
-rw-r--r--decoder/ff_soft_syntax.h27
-rw-r--r--decoder/ff_soft_syntax2.cc234
-rw-r--r--decoder/ff_soft_syntax2.h27
-rw-r--r--decoder/ff_source_syntax2.cc159
-rw-r--r--decoder/ff_source_syntax2.h25
-rw-r--r--decoder/ff_source_syntax2_p.cc166
-rw-r--r--decoder/ff_source_syntax2_p.h25
-rw-r--r--decoder/ff_source_syntax_p.cc245
-rw-r--r--decoder/ff_source_syntax_p.h42
-rw-r--r--training/dtrain/README.md11
-rw-r--r--training/dtrain/dtrain.cc76
-rw-r--r--training/dtrain/dtrain.h74
-rw-r--r--training/dtrain/examples/parallelized/cdec.ini2
-rw-r--r--training/dtrain/examples/parallelized/dtrain.ini2
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.09
-rw-r--r--training/dtrain/examples/parallelized/work/out.0.19
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.09
-rw-r--r--training/dtrain/examples/parallelized/work/out.1.19
-rw-r--r--training/dtrain/examples/standard/dtrain.ini24
-rw-r--r--training/dtrain/examples/standard/expected-output86
-rw-r--r--training/dtrain/kbestget.h66
-rw-r--r--training/dtrain/ksampler.h5
-rwxr-xr-xtraining/dtrain/parallelize.rb7
-rw-r--r--training/dtrain/score.h17
29 files changed, 1634 insertions, 197 deletions
diff --git a/decoder/Makefile.am b/decoder/Makefile.am
index ef98289e..914faaea 100644
--- a/decoder/Makefile.am
+++ b/decoder/Makefile.am
@@ -143,7 +143,13 @@ libcdec_a_SOURCES = \
ff_csplit.cc \
ff_tagger.cc \
ff_source_path.cc \
+ ff_parse_match.cc \
+ ff_soft_syntax.cc \
+ ff_soft_syntax2.cc \
ff_source_syntax.cc \
+ ff_source_syntax_p.cc \
+ ff_source_syntax2.cc \
+ ff_source_syntax2_p.cc \
ff_bleu.cc \
ff_factory.cc \
incremental.cc \
diff --git a/decoder/cdec_ff.cc b/decoder/cdec_ff.cc
index 0bf441d4..e7b31f50 100644
--- a/decoder/cdec_ff.cc
+++ b/decoder/cdec_ff.cc
@@ -14,8 +14,18 @@
#include "ff_rules.h"
#include "ff_ruleshape.h"
#include "ff_bleu.h"
+#include "ff_soft_syntax.h"
+#include "ff_soft_syntax2.h"
#include "ff_source_path.h"
+
+
+#include "ff_parse_match.h"
#include "ff_source_syntax.h"
+#include "ff_source_syntax_p.h"
+#include "ff_source_syntax2.h"
+#include "ff_source_syntax2_p.h"
+
+
#include "ff_register.h"
#include "ff_charset.h"
#include "ff_wordset.h"
@@ -48,8 +58,23 @@ void register_feature_functions() {
ff_registry.Register("NgramFeatures", new FFFactory<NgramDetector>());
ff_registry.Register("RuleContextFeatures", new FFFactory<RuleContextFeatures>());
ff_registry.Register("RuleIdentityFeatures", new FFFactory<RuleIdentityFeatures>());
+
+
+ ff_registry.Register("ParseMatchFeatures", new FFFactory<ParseMatchFeatures>);
+
+ ff_registry.Register("SoftSyntacticFeatures", new FFFactory<SoftSyntacticFeatures>);
+ ff_registry.Register("SoftSyntacticFeatures2", new FFFactory<SoftSyntacticFeatures2>);
+
ff_registry.Register("SourceSyntaxFeatures", new FFFactory<SourceSyntaxFeatures>);
+ ff_registry.Register("SourceSyntaxFeatures2", new FFFactory<SourceSyntaxFeatures2>);
+
ff_registry.Register("SourceSpanSizeFeatures", new FFFactory<SourceSpanSizeFeatures>);
+
+ //ff_registry.Register("PSourceSyntaxFeatures", new FFFactory<PSourceSyntaxFeatures>);
+ //ff_registry.Register("PSourceSpanSizeFeatures", new FFFactory<PSourceSpanSizeFeatures>);
+ //ff_registry.Register("PSourceSyntaxFeatures2", new FFFactory<PSourceSyntaxFeatures2>);
+
+
ff_registry.Register("CMR2008ReorderingFeatures", new FFFactory<CMR2008ReorderingFeatures>());
ff_registry.Register("RuleSourceBigramFeatures", new FFFactory<RuleSourceBigramFeatures>());
ff_registry.Register("RuleTargetBigramFeatures", new FFFactory<RuleTargetBigramFeatures>());
diff --git a/decoder/ff_parse_match.cc b/decoder/ff_parse_match.cc
new file mode 100644
index 00000000..ed556b91
--- /dev/null
+++ b/decoder/ff_parse_match.cc
@@ -0,0 +1,218 @@
+#include "ff_parse_match.h"
+
+#include <sstream>
+#include <stack>
+#include <string>
+
+#include "sentence_metadata.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// implements the parse match features as described in Vilar et al. (2008)
+// source trees must be represented in Penn Treebank format, e.g.
+// (S (NP John) (VP (V left)))
+
+struct ParseMatchFeaturesImpl {
+ ParseMatchFeaturesImpl(const string& param) {
+ if (param.compare("") != 0) {
+ char score_param = (char) param[0];
+ switch(score_param) {
+ case 'b':
+ scoring_method = 0;
+ break;
+ case 'l':
+ scoring_method = 1;
+ break;
+ case 'e':
+ scoring_method = 2;
+ break;
+ case 'r':
+ scoring_method = 3;
+ break;
+ default:
+ scoring_method = 0;
+ }
+ }
+ else {
+ scoring_method = 0;
+ }
+ }
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ //cerr << "TREE: " << tree << endl;
+ src_sent_len = src_len;
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ // cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ // cur_cat.second spans from cur_cat.first to i
+ // cerr << TD::Convert(cur_cat.second) << " from " << cur_cat.first << " to " << i << endl;
+ // NOTE: unary rule chains end up being labeled with the top-most category
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ int FireFeatures(const TRule& rule, const int i, const int j, int* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ //cerr << rule << endl;
+ //cerr << "span: " << i << " " << j << endl;
+ const WordID lhs = src_tree(i,j);
+ int fid_ef = FD::Convert("PM");
+ int min_dist; // minimal distance to next syntactic constituent of this rule's LHS
+ int summed_min_dists; // minimal distances of LHS and NTs summed up
+ if (TD::Convert(lhs).compare("XX") != 0)
+ min_dist= 0;
+ // compute the distance to the next syntactical constituent
+ else {
+ int ok = 0;
+ for (unsigned int k = 1; k < (j - i); k++) {
+ min_dist = k;
+ for (unsigned int l = 0; l <= k; l++) {
+ // check if adding k words to the rule span will
+ // lead to a syntactical constituent
+ int l_add = i-l;
+ int r_add = j+(k-l);
+ //cerr << "Adding: " << l_add << " " << r_add << endl;
+ if ((l_add < src_tree.width() && r_add < src_tree.height()) && (TD::Convert(src_tree(l_add, r_add)).compare("XX") != 0)) {
+ //cerr << TD::Convert(src_tree(i-l,j+(k-l))) << endl;
+ //cerr << "span_add: " << l_add << " " << r_add << endl;
+ ok = 1;
+ break;
+ }
+ // check if removing k words from the rule span will
+ // lead to a syntactical constituent
+ else {
+ //cerr << "Hilfe...!" << endl;
+ int l_rem= i+l;
+ int r_rem = j-(k-l);
+ //cerr << "Removing: " << l_rem << " " << r_rem << endl;
+ if ((l_rem < src_tree.width() && r_rem < src_tree.height()) && TD::Convert(src_tree(l_rem, r_rem)).compare("XX") != 0) {
+ //cerr << TD::Convert(src_tree(i+l,j-(k-l))) << endl;
+ //cerr << "span_rem: " << l_rem << " " << r_rem << endl;
+ ok = 1;
+ break;
+ }
+ }
+ }
+ if (ok) break;
+ }
+ }
+ summed_min_dists = min_dist;
+ //cerr << min_dist << endl;
+ unsigned ntc = 0;
+ for (unsigned k = 0; k < rule.f_.size(); ++k) {
+ int fj = rule.f_[k];
+ if (fj <= 0)
+ summed_min_dists += ants[ntc++];
+ }
+ switch(scoring_method) {
+ case 0:
+ // binary scoring
+ feats->set_value(fid_ef, (summed_min_dists == 0));
+ break;
+ // CHECK: for the remaining scoring methods, the question remains if
+ // min_dist or summed_min_dists should be used
+ case 1:
+ // linear scoring
+ feats->set_value(fid_ef, 1.0/(min_dist+1));
+ break;
+ case 2:
+ // exponential scoring
+ feats->set_value(fid_ef, 1.0/exp(min_dist));
+ break;
+ case 3:
+ // relative scoring
+ feats->set_value(fid_ef, (j-i)/((j-i) + min_dist));
+ break;
+ default:
+ // binary scoring in case nothing is defined
+ feats->set_value(fid_ef, (summed_min_dists == 0));
+ }
+ return min_dist;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ unsigned int src_sent_len;
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ int scoring_method;
+};
+
+ParseMatchFeatures::ParseMatchFeatures(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new ParseMatchFeaturesImpl(param);
+}
+
+ParseMatchFeatures::~ParseMatchFeatures() {
+ delete impl;
+ impl = NULL;
+}
+
+void ParseMatchFeatures::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ int ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const int*>(ant_contexts[i]);
+
+ *static_cast<int*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void ParseMatchFeatures::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
diff --git a/decoder/ff_parse_match.h b/decoder/ff_parse_match.h
new file mode 100644
index 00000000..fa73481a
--- /dev/null
+++ b/decoder/ff_parse_match.h
@@ -0,0 +1,25 @@
+#ifndef _FF_PARSE_MATCH_H_
+#define _FF_PARSE_MATCH_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct ParseMatchFeaturesImpl;
+
+class ParseMatchFeatures : public FeatureFunction {
+ public:
+ ParseMatchFeatures(const std::string& param);
+ ~ParseMatchFeatures();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ ParseMatchFeaturesImpl* impl;
+};
+
+#endif
diff --git a/decoder/ff_soft_syntax.cc b/decoder/ff_soft_syntax.cc
new file mode 100644
index 00000000..9981fa45
--- /dev/null
+++ b/decoder/ff_soft_syntax.cc
@@ -0,0 +1,201 @@
+#include "ff_soft_syntax.h"
+
+#include <cstdio>
+#include <sstream>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "sentence_metadata.h"
+#include "stringlib.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// Implements the soft syntactic features described in
+// Marton and Resnik (2008): "Soft Syntacitc Constraints for Hierarchical Phrase-Based Translation".
+// Source trees must be represented in Penn Treebank format,
+// e.g. (S (NP John) (VP (V left))).
+
+struct SoftSyntacticFeaturesImpl {
+ SoftSyntacticFeaturesImpl(const string& param) {
+ vector<string> labels = SplitOnWhitespace(param);
+ for (unsigned int i = 0; i < labels.size(); i++)
+ //cerr << "Labels: " << labels.at(i) << endl;
+ for (unsigned int i = 0; i < labels.size(); i++) {
+ string label = labels.at(i);
+ pair<string, string> feat_label;
+ feat_label.first = label.substr(0, label.size() - 1);
+ feat_label.second = label.at(label.size() - 1);
+ feat_labels.push_back(feat_label);
+ }
+}
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ //cerr << "String " << tree << endl;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ //cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ // cur_cat.second spans from cur_cat.first to i
+ //cerr << TD::Convert(cur_cat.second) << " from " << cur_cat.first << " to " << i << endl;
+ // NOTE: unary rule chains end up being labeled with the top-most category
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ WordID FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ const WordID lhs = src_tree(i,j);
+ string lhs_str = TD::Convert(lhs);
+ //cerr << "LHS: " << lhs_str << " from " << i << " to " << j << endl;
+ //cerr << "RULE :"<< rule << endl;
+ int& fid_ef = fids_ef(i,j)[&rule];
+ for (unsigned int i = 0; i < feat_labels.size(); i++) {
+ ostringstream os;
+ string label = feat_labels.at(i).first;
+ //cerr << "This Label: " << label << endl;
+ char feat_type = (char) feat_labels.at(i).second.c_str()[0];
+ //cerr << "feat_type: " << feat_type << endl;
+ switch(feat_type) {
+ case '2':
+ if (lhs_str.compare(label) == 0) {
+ os << "SYN:" << label << "_conform";
+ }
+ else {
+ os << "SYN:" << label << "_cross";
+ }
+ fid_ef = FD::Convert(os.str());
+ if (fid_ef > 0) {
+ //cerr << "Feature :" << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ break;
+ case '_':
+ os << "SYN:" << label;
+ fid_ef = FD::Convert(os.str());
+ if (lhs_str.compare(label) == 0) {
+ if (fid_ef > 0) {
+ //cerr << "Feature: " << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ }
+ else {
+ if (fid_ef > 0) {
+ //cerr << "Feature: " << os.str() << endl;
+ feats->set_value(fid_ef, -1.0);
+ }
+ }
+ break;
+ case '+':
+ if (lhs_str.compare(label) == 0) {
+ os << "SYN:" << label << "_conform";
+ fid_ef = FD::Convert(os.str());
+ if (fid_ef > 0) {
+ //cerr << "Feature: " << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ }
+ break;
+ case '-':
+ //cerr << "-" << endl;
+ if (lhs_str.compare(label) != 0) {
+ os << "SYN:" << label << "_cross";
+ fid_ef = FD::Convert(os.str());
+ if (fid_ef > 0) {
+ //cerr << "Feature :" << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ }
+ break;
+ os.clear();
+ os.str("");
+ }
+ //cerr << "Feature: " << os.str() << endl;
+ //cerr << endl;
+ }
+ return lhs;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ vector<pair<string, string> > feat_labels;
+};
+
+SoftSyntacticFeatures::SoftSyntacticFeatures(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new SoftSyntacticFeaturesImpl(param);
+}
+
+SoftSyntacticFeatures::~SoftSyntacticFeatures() {
+ delete impl;
+ impl = NULL;
+}
+
+void SoftSyntacticFeatures::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ WordID ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const WordID*>(ant_contexts[i]);
+
+ *static_cast<WordID*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void SoftSyntacticFeatures::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
diff --git a/decoder/ff_soft_syntax.h b/decoder/ff_soft_syntax.h
new file mode 100644
index 00000000..79352f49
--- /dev/null
+++ b/decoder/ff_soft_syntax.h
@@ -0,0 +1,27 @@
+#ifndef _FF_SOFTSYNTAX_H_
+#define _FF_SOFTSYNTAX_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct SoftSyntacticFeaturesImpl;
+
+class SoftSyntacticFeatures : public FeatureFunction {
+ public:
+ SoftSyntacticFeatures(const std::string& param);
+ ~SoftSyntacticFeatures();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ SoftSyntacticFeaturesImpl* impl;
+};
+
+
+
+#endif
diff --git a/decoder/ff_soft_syntax2.cc b/decoder/ff_soft_syntax2.cc
new file mode 100644
index 00000000..121bc39b
--- /dev/null
+++ b/decoder/ff_soft_syntax2.cc
@@ -0,0 +1,234 @@
+#include "ff_soft_syntax2.h"
+
+#include <cstdio>
+#include <sstream>
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "sentence_metadata.h"
+#include "stringlib.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// Implements the soft syntactic features described in
+// Marton and Resnik (2008): "Soft Syntacitc Constraints for Hierarchical Phrase-Based Translation".
+// Source trees must be represented in Penn Treebank format,
+// e.g. (S (NP John) (VP (V left))).
+
+struct SoftSyntacticFeatures2Impl {
+ SoftSyntacticFeatures2Impl(const string& param) {
+ vector<string> labels = SplitOnWhitespace(param);
+ //for (unsigned int i = 0; i < labels.size(); i++)
+ //cerr << "Labels: " << labels.at(i) << endl;
+ for (unsigned int i = 0; i < labels.size(); i++) {
+ string label = labels.at(i);
+ pair<string, string> feat_label;
+ feat_label.first = label.substr(0, label.size() - 1);
+ feat_label.second = label.at(label.size() - 1);
+ feat_labels.push_back(feat_label);
+ }
+ }
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ //cerr << "String " << tree << endl;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ //cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ // cur_cat.second spans from cur_cat.first to i
+ //cerr << TD::Convert(cur_cat.second) << " from " << cur_cat.first << " to " << i << endl;
+ // NOTE: unary rule chains end up being labeled with the top-most category
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ WordID FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ const WordID lhs = src_tree(i,j);
+ string lhs_str = TD::Convert(lhs);
+ //cerr << "LHS: " << lhs_str << " from " << i << " to " << j << endl;
+ //cerr << "RULE :"<< rule << endl;
+ int& fid_ef = fids_ef(i,j)[&rule];
+ string lhs_to_str = TD::Convert(lhs);
+ int min_dist;
+ string min_dist_label;
+ if (lhs_to_str.compare("XX") != 0) {
+ min_dist = 0;
+ min_dist_label = lhs_to_str;
+ }
+ else {
+ int ok = 0;
+ for (unsigned int k = 1; k < (j - i); k++) {
+ min_dist = k;
+ for (unsigned int l = 0; l <= k; l++) {
+ int l_add = i-l;
+ int r_add = j+(k-l);
+ if ((l_add < src_tree.width() && r_add < src_tree.height()) && (TD::Convert(src_tree(l_add, r_add)).compare("XX") != 0)) {
+ ok = 1;
+ min_dist_label = (TD::Convert(src_tree(l_add, r_add)));
+ break;
+ }
+ else {
+ int l_rem= i+l;
+ int r_rem = j-(k-l);
+ if ((l_rem < src_tree.width() && r_rem < src_tree.height()) && TD::Convert(src_tree(l_rem, r_rem)).compare("XX") != 0) {
+ ok = 1;
+ min_dist_label = (TD::Convert(src_tree(l_rem, r_rem)));
+ break;
+ }
+ }
+ }
+ if (ok) break;
+ }
+ }
+ //cerr << "SPAN: " << i << " " << j << endl;
+ //cerr << "MINDIST: " << min_dist << endl;
+ //cerr << "MINDISTLABEL: " << min_dist_label << endl;
+ for (unsigned int i = 0; i < feat_labels.size(); i++) {
+ ostringstream os;
+ string label = feat_labels.at(i).first;
+ //cerr << "This Label: " << label << endl;
+ char feat_type = (char) feat_labels.at(i).second.c_str()[0];
+ //cerr << "feat_type: " << feat_type << endl;
+ switch(feat_type) {
+ case '2':
+ if (min_dist_label.compare(label) == 0) {
+ if (min_dist == 0) {
+ os << "SYN:" << label << "_conform";
+ }
+ else {
+ os << "SYN:" << label << "_cross";
+ }
+ fid_ef = FD::Convert(os.str());
+ //cerr << "Feature :" << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ break;
+ case '_':
+ os << "SYN:" << label;
+ fid_ef = FD::Convert(os.str());
+ if (min_dist_label.compare(label) == 0) {
+ //cerr << "Feature: " << os.str() << endl;
+ if (min_dist == 0) {
+ feats->set_value(fid_ef, 1.0);
+ }
+ else {
+ //cerr << "Feature: " << os.str() << endl;
+ feats->set_value(fid_ef, -1.0);
+ }
+ }
+ break;
+ case '+':
+ if (min_dist_label.compare(label) == 0) {
+ os << "SYN:" << label << "_conform";
+ fid_ef = FD::Convert(os.str());
+ if (min_dist == 0) {
+ //cerr << "Feature: " << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ }
+ break;
+ case '-':
+ //cerr << "-" << endl;
+ if (min_dist_label.compare(label) != 0) {
+ os << "SYN:" << label << "_cross";
+ fid_ef = FD::Convert(os.str());
+ if (min_dist > 0) {
+ //cerr << "Feature :" << os.str() << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ }
+ break;
+ os.clear();
+ os.str("");
+ }
+ //cerr << "FEATURE: " << os.str() << endl;
+ //cerr << endl;
+ }
+ return lhs;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ vector<pair<string, string> > feat_labels;
+};
+
+SoftSyntacticFeatures2::SoftSyntacticFeatures2(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new SoftSyntacticFeatures2Impl(param);
+}
+
+SoftSyntacticFeatures2::~SoftSyntacticFeatures2() {
+ delete impl;
+ impl = NULL;
+}
+
+void SoftSyntacticFeatures2::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ WordID ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const WordID*>(ant_contexts[i]);
+
+ *static_cast<WordID*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void SoftSyntacticFeatures2::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
diff --git a/decoder/ff_soft_syntax2.h b/decoder/ff_soft_syntax2.h
new file mode 100644
index 00000000..4de91d86
--- /dev/null
+++ b/decoder/ff_soft_syntax2.h
@@ -0,0 +1,27 @@
+#ifndef _FF_SOFTSYNTAX2_H_
+#define _FF_SOFTSYNTAX2_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct SoftSyntacticFeatures2Impl;
+
+class SoftSyntacticFeatures2 : public FeatureFunction {
+ public:
+ SoftSyntacticFeatures2(const std::string& param);
+ ~SoftSyntacticFeatures2();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ SoftSyntacticFeatures2Impl* impl;
+};
+
+
+
+#endif
diff --git a/decoder/ff_source_syntax2.cc b/decoder/ff_source_syntax2.cc
new file mode 100644
index 00000000..08ece917
--- /dev/null
+++ b/decoder/ff_source_syntax2.cc
@@ -0,0 +1,159 @@
+#include "ff_source_syntax2.h"
+
+#include <sstream>
+#include <stack>
+#include <string>
+#include <tr1/unordered_set>
+
+#include "sentence_metadata.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// implements the source side syntax features described in Blunsom et al. (EMNLP 2008)
+// source trees must be represented in Penn Treebank format, e.g.
+// (S (NP John) (VP (V left)))
+
+struct SourceSyntaxFeatures2Impl {
+ SourceSyntaxFeatures2Impl(const string& param) {
+ if (!(param.compare("") == 0)) {
+ string triggered_features_fn = param;
+ ReadFile triggered_features(triggered_features_fn);
+ string in;
+ while(getline(*triggered_features, in)) {
+ feature_filter.insert(FD::Convert(in));
+ }
+ }
+ }
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ //cerr << "TREE: " << tree << endl;
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ // cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ WordID FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ const WordID lhs = src_tree(i,j);
+ int& fid_ef = fids_ef(i,j)[&rule];
+ ostringstream os;
+ os << "SYN:" << TD::Convert(lhs);
+ os << ':';
+ unsigned ntc = 0;
+ for (unsigned k = 0; k < rule.f_.size(); ++k) {
+ int fj = rule.f_[k];
+ if (k > 0 && fj <= 0) os << '_';
+ if (fj <= 0) {
+ os << '[' << TD::Convert(ants[ntc++]) << ']';
+ } /*else {
+ os << TD::Convert(fj);
+ }*/
+ }
+ os << ':';
+ for (unsigned k = 0; k < rule.e_.size(); ++k) {
+ const int ei = rule.e_[k];
+ if (k > 0) os << '_';
+ if (ei <= 0)
+ os << '[' << (1-ei) << ']';
+ else
+ os << TD::Convert(ei);
+ }
+ fid_ef = FD::Convert(os.str());
+ //cerr << "FEATURE: " << os.str() << endl;
+ //cerr << "FID_EF: " << fid_ef << endl;
+ if (feature_filter.find(fid_ef) != feature_filter.end()) {
+ cerr << "SYN-Feature was trigger more than once on training set." << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ else cerr << "SYN-Feature was triggered less than once on training set." << endl;
+ return lhs;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ tr1::unordered_set<int> feature_filter;
+
+};
+
+SourceSyntaxFeatures2::SourceSyntaxFeatures2(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new SourceSyntaxFeatures2Impl(param);
+}
+
+SourceSyntaxFeatures2::~SourceSyntaxFeatures2() {
+ delete impl;
+ impl = NULL;
+}
+
+void SourceSyntaxFeatures2::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ WordID ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const WordID*>(ant_contexts[i]);
+
+ *static_cast<WordID*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void SourceSyntaxFeatures2::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
diff --git a/decoder/ff_source_syntax2.h b/decoder/ff_source_syntax2.h
new file mode 100644
index 00000000..b6b7dc3d
--- /dev/null
+++ b/decoder/ff_source_syntax2.h
@@ -0,0 +1,25 @@
+#ifndef _FF_SOURCE_TOOLS2_H_
+#define _FF_SOURCE_TOOLS2_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct SourceSyntaxFeatures2Impl;
+
+class SourceSyntaxFeatures2 : public FeatureFunction {
+ public:
+ SourceSyntaxFeatures2(const std::string& param);
+ ~SourceSyntaxFeatures2();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ SourceSyntaxFeatures2Impl* impl;
+};
+
+#endif
diff --git a/decoder/ff_source_syntax2_p.cc b/decoder/ff_source_syntax2_p.cc
new file mode 100644
index 00000000..dfa791ea
--- /dev/null
+++ b/decoder/ff_source_syntax2_p.cc
@@ -0,0 +1,166 @@
+#include "ff_source_syntax2_p.h"
+
+#include <sstream>
+#include <stack>
+#include <string>
+#include <tr1/unordered_set>
+
+#include "sentence_metadata.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// implements the source side syntax features described in Blunsom et al. (EMNLP 2008)
+// source trees must be represented in Penn Treebank format, e.g.
+// (S (NP John) (VP (V left)))
+
+struct PSourceSyntaxFeatures2Impl {
+ PSourceSyntaxFeatures2Impl(const string& param) {
+ if (param.compare("") != 0) {
+ string triggered_features_fn = param;
+ ReadFile triggered_features(triggered_features_fn);
+ string in;
+ while(getline(*triggered_features, in)) {
+ feature_filter.insert(FD::Convert(in));
+ }
+ }
+ /*cerr << "find(\"One\") == " << boolalpha << (table.find("One") != table.end()) << endl;
+ cerr << "find(\"Three\") == " << boolalpha << (table.find("Three") != table.end()) << endl;*/
+ }
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ //cerr << "TREE: " << tree << endl;
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ // cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ }
+ //cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ WordID FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ const WordID lhs = src_tree(i,j);
+ int& fid_ef = fids_ef(i,j)[&rule];
+ ostringstream os;
+ os << "SYN:" << TD::Convert(lhs);
+ os << ':';
+ unsigned ntc = 0;
+ for (unsigned k = 0; k < rule.f_.size(); ++k) {
+ int fj = rule.f_[k];
+ if (k > 0 && fj <= 0) os << '_';
+ if (fj <= 0) {
+ os << '[' << TD::Convert(ants[ntc++]) << ']';
+ } /*else {
+ os << TD::Convert(fj);
+ }*/
+ }
+ os << ':';
+ for (unsigned k = 0; k < rule.e_.size(); ++k) {
+ const int ei = rule.e_[k];
+ if (k > 0) os << '_';
+ if (ei <= 0)
+ os << '[' << (1-ei) << ']';
+ else
+ os << TD::Convert(ei);
+ }
+ fid_ef = FD::Convert(os.str());
+ //cerr << "FEATURE: " << os.str() << endl;
+ //cerr << "FID_EF: " << fid_ef << endl;
+ if (feature_filter.size() > 0) {
+ if (feature_filter.find(fid_ef) != feature_filter.end()) {
+ //cerr << "SYN-Feature was trigger more than once on training set." << endl;
+ feats->set_value(fid_ef, 1.0);
+ }
+ //else cerr << "SYN-Feature was triggered less than once on training set." << endli;
+ }
+ else {
+ feats->set_value(fid_ef, 1.0);
+ }
+ return lhs;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ tr1::unordered_set<int> feature_filter;
+
+};
+
+PSourceSyntaxFeatures2::PSourceSyntaxFeatures2(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new PSourceSyntaxFeatures2Impl(param);
+}
+
+PSourceSyntaxFeatures2::~PSourceSyntaxFeatures2() {
+ delete impl;
+ impl = NULL;
+}
+
+void PSourceSyntaxFeatures2::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ WordID ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const WordID*>(ant_contexts[i]);
+
+ *static_cast<WordID*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void PSourceSyntaxFeatures2::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
diff --git a/decoder/ff_source_syntax2_p.h b/decoder/ff_source_syntax2_p.h
new file mode 100644
index 00000000..d56ecab0
--- /dev/null
+++ b/decoder/ff_source_syntax2_p.h
@@ -0,0 +1,25 @@
+#ifndef _FF_SOURCE_TOOLS2_H_
+#define _FF_SOURCE_TOOLS2_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct PSourceSyntaxFeatures2Impl;
+
+class PSourceSyntaxFeatures2 : public FeatureFunction {
+ public:
+ PSourceSyntaxFeatures2(const std::string& param);
+ ~PSourceSyntaxFeatures2();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ PSourceSyntaxFeatures2Impl* impl;
+};
+
+#endif
diff --git a/decoder/ff_source_syntax_p.cc b/decoder/ff_source_syntax_p.cc
new file mode 100644
index 00000000..cd081544
--- /dev/null
+++ b/decoder/ff_source_syntax_p.cc
@@ -0,0 +1,245 @@
+#include "ff_source_syntax_p.h"
+
+#include <sstream>
+#include <stack>
+#include <tr1/unordered_set>
+
+#include "sentence_metadata.h"
+#include "array2d.h"
+#include "filelib.h"
+
+using namespace std;
+
+// implements the source side syntax features described in Blunsom et al. (EMNLP 2008)
+// source trees must be represented in Penn Treebank format, e.g.
+// (S (NP John) (VP (V left)))
+
+// log transform to make long spans cluster together
+// but preserve differences
+inline int SpanSizeTransform(unsigned span_size) {
+ if (!span_size) return 0;
+ return static_cast<int>(log(span_size+1) / log(1.39)) - 1;
+}
+
+struct PSourceSyntaxFeaturesImpl {
+ PSourceSyntaxFeaturesImpl() {}
+
+ PSourceSyntaxFeaturesImpl(const string& param) {
+ if (!(param.compare("") == 0)) {
+ string triggered_features_fn = param;
+ ReadFile triggered_features(triggered_features_fn);
+ string in;
+ while(getline(*triggered_features, in)) {
+ feature_filter.insert(FD::Convert(in));
+ }
+ }
+ }
+
+ void InitializeGrids(const string& tree, unsigned src_len) {
+ assert(tree.size() > 0);
+ //fids_cat.clear();
+ fids_ef.clear();
+ src_tree.clear();
+ //fids_cat.resize(src_len, src_len + 1);
+ fids_ef.resize(src_len, src_len + 1);
+ src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
+ ParseTreeString(tree, src_len);
+ }
+
+ void ParseTreeString(const string& tree, unsigned src_len) {
+ stack<pair<int, WordID> > stk; // first = i, second = category
+ pair<int, WordID> cur_cat; cur_cat.first = -1;
+ unsigned i = 0;
+ unsigned p = 0;
+ while(p < tree.size()) {
+ const char cur = tree[p];
+ if (cur == '(') {
+ stk.push(cur_cat);
+ ++p;
+ unsigned k = p + 1;
+ while (k < tree.size() && tree[k] != ' ') { ++k; }
+ cur_cat.first = i;
+ cur_cat.second = TD::Convert(tree.substr(p, k - p));
+ // cerr << "NT: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ p = k + 1;
+ } else if (cur == ')') {
+ unsigned k = p;
+ while (k < tree.size() && tree[k] == ')') { ++k; }
+ const unsigned num_closes = k - p;
+ for (unsigned ci = 0; ci < num_closes; ++ci) {
+ // cur_cat.second spans from cur_cat.first to i
+ // cerr << TD::Convert(cur_cat.second) << " from " << cur_cat.first << " to " << i << endl;
+ // NOTE: unary rule chains end up being labeled with the top-most category
+ src_tree(cur_cat.first, i) = cur_cat.second;
+ cur_cat = stk.top();
+ stk.pop();
+ }
+ p = k;
+ while (p < tree.size() && (tree[p] == ' ' || tree[p] == '\t')) { ++p; }
+ } else if (cur == ' ' || cur == '\t') {
+ cerr << "Unexpected whitespace in: " << tree << endl;
+ abort();
+ } else { // terminal symbol
+ unsigned k = p + 1;
+ do {
+ while (k < tree.size() && tree[k] != ')' && tree[k] != ' ') { ++k; }
+ // cerr << "TERM: '" << tree.substr(p, k-p) << "' (i=" << i << ")\n";
+ ++i;
+ assert(i <= src_len);
+ while (k < tree.size() && tree[k] == ' ') { ++k; }
+ p = k;
+ } while (p < tree.size() && tree[p] != ')');
+ }
+ }
+ // cerr << "i=" << i << " src_len=" << src_len << endl;
+ assert(i == src_len); // make sure tree specified in src_tree is
+ // the same length as the source sentence
+ }
+
+ WordID FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ //cerr << "fire features: " << rule.AsString() << " for " << i << "," << j << endl;
+ const WordID lhs = src_tree(i,j);
+ //int& fid_cat = fids_cat(i,j);
+ int& fid_ef = fids_ef(i,j)[&rule];
+ if (fid_ef <= 0) {
+ ostringstream os;
+ //ostringstream os2;
+ os << "SYN:" << TD::Convert(lhs);
+ //os2 << "SYN:" << TD::Convert(lhs) << '_' << SpanSizeTransform(j - i);
+ //fid_cat = FD::Convert(os2.str());
+ os << ':';
+ unsigned ntc = 0;
+ for (unsigned k = 0; k < rule.f_.size(); ++k) {
+ if (k > 0) os << '_';
+ int fj = rule.f_[k];
+ if (fj <= 0) {
+ os << '[' << TD::Convert(ants[ntc++]) << ']';
+ } else {
+ os << TD::Convert(fj);
+ }
+ }
+ os << ':';
+ for (unsigned k = 0; k < rule.e_.size(); ++k) {
+ const int ei = rule.e_[k];
+ if (k > 0) os << '_';
+ if (ei <= 0)
+ os << '[' << (1-ei) << ']';
+ else
+ os << TD::Convert(ei);
+ }
+ fid_ef = FD::Convert(os.str());
+ }
+ //if (fid_cat > 0)
+ // feats->set_value(fid_cat, 1.0);
+ if (fid_ef > 0 && (feature_filter.find(fid_ef) != feature_filter.end()))
+ feats->set_value(fid_ef, 1.0);
+ return lhs;
+ }
+
+ Array2D<WordID> src_tree; // src_tree(i,j) NT = type
+ // mutable Array2D<int> fids_cat; // this tends to overfit baddly
+ mutable Array2D<map<const TRule*, int> > fids_ef; // fires for fully lexicalized
+ tr1::unordered_set<int> feature_filter;
+};
+
+PSourceSyntaxFeatures::PSourceSyntaxFeatures(const string& param) :
+ FeatureFunction(sizeof(WordID)) {
+ impl = new PSourceSyntaxFeaturesImpl(param);
+}
+
+PSourceSyntaxFeatures::~PSourceSyntaxFeatures() {
+ delete impl;
+ impl = NULL;
+}
+
+void PSourceSyntaxFeatures::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ WordID ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const WordID*>(ant_contexts[i]);
+
+ *static_cast<WordID*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void PSourceSyntaxFeatures::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSGMLValue("src_tree"), smeta.GetSourceLength());
+}
+
+struct PSourceSpanSizeFeaturesImpl {
+ PSourceSpanSizeFeaturesImpl() {}
+
+ void InitializeGrids(unsigned src_len) {
+ fids.clear();
+ fids.resize(src_len, src_len + 1);
+ }
+
+ int FireFeatures(const TRule& rule, const int i, const int j, const WordID* ants, SparseVector<double>* feats) {
+ if (rule.Arity() > 0) {
+ int& fid = fids(i,j)[&rule];
+ if (fid <= 0) {
+ ostringstream os;
+ os << "SSS:";
+ unsigned ntc = 0;
+ for (unsigned k = 0; k < rule.f_.size(); ++k) {
+ if (k > 0) os << '_';
+ int fj = rule.f_[k];
+ if (fj <= 0) {
+ os << '[' << TD::Convert(-fj) << ants[ntc++] << ']';
+ } else {
+ os << TD::Convert(fj);
+ }
+ }
+ os << ':';
+ for (unsigned k = 0; k < rule.e_.size(); ++k) {
+ const int ei = rule.e_[k];
+ if (k > 0) os << '_';
+ if (ei <= 0)
+ os << '[' << (1-ei) << ']';
+ else
+ os << TD::Convert(ei);
+ }
+ fid = FD::Convert(os.str());
+ }
+ if (fid > 0)
+ feats->set_value(fid, 1.0);
+ }
+ return SpanSizeTransform(j - i);
+ }
+
+ mutable Array2D<map<const TRule*, int> > fids;
+};
+
+PSourceSpanSizeFeatures::PSourceSpanSizeFeatures(const string& param) :
+ FeatureFunction(sizeof(char)) {
+ impl = new PSourceSpanSizeFeaturesImpl;
+}
+
+PSourceSpanSizeFeatures::~PSourceSpanSizeFeatures() {
+ delete impl;
+ impl = NULL;
+}
+
+void PSourceSpanSizeFeatures::TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const {
+ int ants[8];
+ for (unsigned i = 0; i < ant_contexts.size(); ++i)
+ ants[i] = *static_cast<const char*>(ant_contexts[i]);
+
+ *static_cast<char*>(context) =
+ impl->FireFeatures(*edge.rule_, edge.i_, edge.j_, ants, features);
+}
+
+void PSourceSpanSizeFeatures::PrepareForInput(const SentenceMetadata& smeta) {
+ impl->InitializeGrids(smeta.GetSourceLength());
+}
+
+
diff --git a/decoder/ff_source_syntax_p.h b/decoder/ff_source_syntax_p.h
new file mode 100644
index 00000000..2dd9094a
--- /dev/null
+++ b/decoder/ff_source_syntax_p.h
@@ -0,0 +1,42 @@
+#ifndef _FF_SOURCE_TOOLS_H_
+#define _FF_SOURCE_TOOLS_H_
+
+#include "ff.h"
+#include "hg.h"
+
+struct PSourceSyntaxFeaturesImpl;
+
+class PSourceSyntaxFeatures : public FeatureFunction {
+ public:
+ PSourceSyntaxFeatures(const std::string& param);
+ ~PSourceSyntaxFeatures();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ PSourceSyntaxFeaturesImpl* impl;
+};
+
+struct PSourceSpanSizeFeaturesImpl;
+class PSourceSpanSizeFeatures : public FeatureFunction {
+ public:
+ PSourceSpanSizeFeatures(const std::string& param);
+ ~PSourceSpanSizeFeatures();
+ protected:
+ virtual void TraversalFeaturesImpl(const SentenceMetadata& smeta,
+ const Hypergraph::Edge& edge,
+ const std::vector<const void*>& ant_contexts,
+ SparseVector<double>* features,
+ SparseVector<double>* estimated_features,
+ void* context) const;
+ virtual void PrepareForInput(const SentenceMetadata& smeta);
+ private:
+ PSourceSpanSizeFeaturesImpl* impl;
+};
+
+#endif
diff --git a/training/dtrain/README.md b/training/dtrain/README.md
index 2ab2f232..2bae6b48 100644
--- a/training/dtrain/README.md
+++ b/training/dtrain/README.md
@@ -17,6 +17,17 @@ To build only parts needed for dtrain do
cd training/dtrain/; make
```
+Ideas
+-----
+ * get approx_bleu to work?
+ * implement minibatches (Minibatch and Parallelization for Online Large Margin Structured Learning)
+ * learning rate 1/T?
+ * use an oracle? mira-like (model vs. BLEU), feature repr. of reference!?
+ * implement lc_bleu properly
+ * merge kbest lists of previous epochs (as MERT does)
+ * ``walk entire regularization path''
+ * rerank after each update?
+
Running
-------
See directories under test/ .
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 149f87d4..0ee2f124 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -1,4 +1,10 @@
#include "dtrain.h"
+#include "score.h"
+#include "kbestget.h"
+#include "ksampler.h"
+#include "pairsampling.h"
+
+using namespace dtrain;
bool
@@ -138,23 +144,23 @@ main(int argc, char** argv)
string scorer_str = cfg["scorer"].as<string>();
LocalScorer* scorer;
if (scorer_str == "bleu") {
- scorer = dynamic_cast<BleuScorer*>(new BleuScorer);
+ scorer = static_cast<BleuScorer*>(new BleuScorer);
} else if (scorer_str == "stupid_bleu") {
- scorer = dynamic_cast<StupidBleuScorer*>(new StupidBleuScorer);
+ scorer = static_cast<StupidBleuScorer*>(new StupidBleuScorer);
} else if (scorer_str == "fixed_stupid_bleu") {
- scorer = dynamic_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer);
+ scorer = static_cast<FixedStupidBleuScorer*>(new FixedStupidBleuScorer);
} else if (scorer_str == "smooth_bleu") {
- scorer = dynamic_cast<SmoothBleuScorer*>(new SmoothBleuScorer);
+ scorer = static_cast<SmoothBleuScorer*>(new SmoothBleuScorer);
} else if (scorer_str == "sum_bleu") {
- scorer = dynamic_cast<SumBleuScorer*>(new SumBleuScorer);
+ scorer = static_cast<SumBleuScorer*>(new SumBleuScorer);
} else if (scorer_str == "sumexp_bleu") {
- scorer = dynamic_cast<SumExpBleuScorer*>(new SumExpBleuScorer);
+ scorer = static_cast<SumExpBleuScorer*>(new SumExpBleuScorer);
} else if (scorer_str == "sumwhatever_bleu") {
- scorer = dynamic_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer);
+ scorer = static_cast<SumWhateverBleuScorer*>(new SumWhateverBleuScorer);
} else if (scorer_str == "approx_bleu") {
- scorer = dynamic_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d));
+ scorer = static_cast<ApproxBleuScorer*>(new ApproxBleuScorer(N, approx_bleu_d));
} else if (scorer_str == "lc_bleu") {
- scorer = dynamic_cast<LinearBleuScorer*>(new LinearBleuScorer(N));
+ scorer = static_cast<LinearBleuScorer*>(new LinearBleuScorer(N));
} else {
cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl;
exit(1);
@@ -166,9 +172,9 @@ main(int argc, char** argv)
MT19937 rng; // random number generator, only for forest sampling
HypSampler* observer;
if (sample_from == "kbest")
- observer = dynamic_cast<KBestGetter*>(new KBestGetter(k, filter_type));
+ observer = static_cast<KBestGetter*>(new KBestGetter(k, filter_type));
else
- observer = dynamic_cast<KSampler*>(new KSampler(k, &rng));
+ observer = static_cast<KSampler*>(new KSampler(k, &rng));
observer->SetScorer(scorer);
// init weights
@@ -360,6 +366,9 @@ main(int argc, char** argv)
PROsampling(samples, pairs, pair_threshold, max_pairs);
npairs += pairs.size();
+ SparseVector<weight_t> lambdas_copy;
+ if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
+
for (vector<pair<ScoredHyp,ScoredHyp> >::iterator it = pairs.begin();
it != pairs.end(); it++) {
bool rank_error;
@@ -369,7 +378,7 @@ main(int argc, char** argv)
margin = std::numeric_limits<float>::max();
} else {
rank_error = it->first.model <= it->second.model;
- margin = fabs(fabs(it->first.model) - fabs(it->second.model));
+ margin = fabs(it->first.model - it->second.model);
if (!rank_error && margin < loss_margin) margin_violations++;
}
if (rank_error) rank_errors++;
@@ -383,23 +392,26 @@ main(int argc, char** argv)
}
// l1 regularization
- // please note that this penalizes _all_ weights
- // (contrary to only the ones changed by the last update)
- // after a _sentence_ (not after each example/pair)
+ // please note that this regularizations happen
+ // after a _sentence_ -- not after each example/pair!
if (l1naive) {
FastSparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
- it->second -= sign(it->second) * l1_reg;
+ if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
+ it->second -= sign(it->second) * l1_reg;
+ }
}
} else if (l1clip) {
FastSparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
- if (it->second != 0) {
- weight_t v = it->second;
- if (v > 0) {
- it->second = max(0., v - l1_reg);
- } else {
- it->second = min(0., v + l1_reg);
+ if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
+ if (it->second != 0) {
+ weight_t v = it->second;
+ if (v > 0) {
+ it->second = max(0., v - l1_reg);
+ } else {
+ it->second = min(0., v + l1_reg);
+ }
}
}
}
@@ -407,16 +419,18 @@ main(int argc, char** argv)
weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input
FastSparseVector<weight_t>::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
- if (it->second != 0) {
- weight_t v = it->second;
- weight_t penalized = 0.;
- if (v > 0) {
- penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first)));
- } else {
- penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first)));
+ if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
+ if (it->second != 0) {
+ weight_t v = it->second;
+ weight_t penalized = 0.;
+ if (v > 0) {
+ penalized = max(0., v-(acc_penalty + cumulative_penalties.get(it->first)));
+ } else {
+ penalized = min(0., v+(acc_penalty - cumulative_penalties.get(it->first)));
+ }
+ it->second = penalized;
+ cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized);
}
- it->second = penalized;
- cumulative_penalties.set_value(it->first, cumulative_penalties.get(it->first)+penalized);
}
}
}
diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h
index eb0b9f17..3981fb39 100644
--- a/training/dtrain/dtrain.h
+++ b/training/dtrain/dtrain.h
@@ -11,16 +11,19 @@
#include <boost/algorithm/string.hpp>
#include <boost/program_options.hpp>
-#include "ksampler.h"
-#include "pairsampling.h"
-
-#include "filelib.h"
-
+#include "decoder.h"
+#include "ff_register.h"
+#include "sentence_metadata.h"
+#include "verbose.h"
+#include "viterbi.h"
using namespace std;
-using namespace dtrain;
namespace po = boost::program_options;
+namespace dtrain
+{
+
+
inline void register_and_convert(const vector<string>& strs, vector<WordID>& ids)
{
vector<string>::const_iterator it;
@@ -42,17 +45,55 @@ inline string gettmpf(const string path, const string infix)
return string(fn);
}
-inline void split_in(string& s, vector<string>& parts)
+typedef double score_t;
+
+struct ScoredHyp
{
- unsigned f = 0;
- for(unsigned i = 0; i < 3; i++) {
- unsigned e = f;
- f = s.find("\t", f+1);
- if (e != 0) parts.push_back(s.substr(e+1, f-e-1));
- else parts.push_back(s.substr(0, f));
+ vector<WordID> w;
+ SparseVector<double> f;
+ score_t model;
+ score_t score;
+ unsigned rank;
+};
+
+struct LocalScorer
+{
+ unsigned N_;
+ vector<score_t> w_;
+
+ virtual score_t
+ Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0;
+
+ virtual void Reset() {} // only for ApproxBleuScorer, LinearBleuScorer
+
+ inline void
+ Init(unsigned N, vector<score_t> weights)
+ {
+ assert(N > 0);
+ N_ = N;
+ if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_);
+ else w_ = weights;
}
- s.erase(0, f+1);
-}
+
+ inline score_t
+ brevity_penalty(const unsigned hyp_len, const unsigned ref_len)
+ {
+ if (hyp_len > ref_len) return 1;
+ return exp(1 - (score_t)ref_len/hyp_len);
+ }
+};
+
+struct HypSampler : public DecoderObserver
+{
+ LocalScorer* scorer_;
+ vector<WordID>* ref_;
+ unsigned f_count_, sz_;
+ virtual vector<ScoredHyp>* GetSamples()=0;
+ inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; }
+ inline void SetRef(vector<WordID>& ref) { ref_ = &ref; }
+ inline unsigned get_f_count() { return f_count_; }
+ inline unsigned get_sz() { return sz_; }
+};
struct HSReporter
{
@@ -88,5 +129,8 @@ inline T sign(T z)
return z < 0 ? -1 : +1;
}
+
+} // namespace
+
#endif
diff --git a/training/dtrain/examples/parallelized/cdec.ini b/training/dtrain/examples/parallelized/cdec.ini
index e43ba1c4..5773029a 100644
--- a/training/dtrain/examples/parallelized/cdec.ini
+++ b/training/dtrain/examples/parallelized/cdec.ini
@@ -4,7 +4,7 @@ intersection_strategy=cube_pruning
cubepruning_pop_limit=200
scfg_max_span_limit=15
feature_function=WordPenalty
-feature_function=KLanguageModel ../example/nc-wmt11.en.srilm.gz
+feature_function=KLanguageModel ../standard//nc-wmt11.en.srilm.gz
#feature_function=ArityPenalty
#feature_function=CMR2008ReorderingFeatures
#feature_function=Dwarf
diff --git a/training/dtrain/examples/parallelized/dtrain.ini b/training/dtrain/examples/parallelized/dtrain.ini
index f19ef891..0b0932d6 100644
--- a/training/dtrain/examples/parallelized/dtrain.ini
+++ b/training/dtrain/examples/parallelized/dtrain.ini
@@ -11,6 +11,4 @@ pair_sampling=XYX
hi_lo=0.1
select_weights=last
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
-# newer version of the grammar extractor use different feature names:
-#print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
decoder_config=cdec.ini
diff --git a/training/dtrain/examples/parallelized/work/out.0.0 b/training/dtrain/examples/parallelized/work/out.0.0
index 7a00ed0f..c559dd4d 100644
--- a/training/dtrain/examples/parallelized/work/out.0.0
+++ b/training/dtrain/examples/parallelized/work/out.0.0
@@ -1,9 +1,9 @@
cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../example/nc-wmt11.en.srilm.gz
+Reading ../standard//nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 3121929377
+Seeding random number sequence to 405292278
dtrain
Parameters:
@@ -16,6 +16,7 @@ Parameters:
learning rate 0.0001
gamma 0
loss margin 1
+ faster perceptron 0
pairs 'XYX'
hi lo 0.1
pair threshold 0
@@ -51,11 +52,11 @@ WEIGHTS
non0 feature count: 12
avg list sz: 100
avg f count: 11.32
-(time 0.37 min, 4.4 s/S)
+(time 0.35 min, 4.2 s/S)
Writing weights file to 'work/weights.0.0' ...
done
---
Best iteration: 1 [SCORE 'stupid_bleu'=0.17521].
-This took 0.36667 min.
+This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.0.1 b/training/dtrain/examples/parallelized/work/out.0.1
index e2bd6649..8bc7ea9c 100644
--- a/training/dtrain/examples/parallelized/work/out.0.1
+++ b/training/dtrain/examples/parallelized/work/out.0.1
@@ -1,9 +1,9 @@
cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../example/nc-wmt11.en.srilm.gz
+Reading ../standard//nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 2767202922
+Seeding random number sequence to 43859692
dtrain
Parameters:
@@ -16,6 +16,7 @@ Parameters:
learning rate 0.0001
gamma 0
loss margin 1
+ faster perceptron 0
pairs 'XYX'
hi lo 0.1
pair threshold 0
@@ -52,11 +53,11 @@ WEIGHTS
non0 feature count: 12
avg list sz: 100
avg f count: 10.496
-(time 0.32 min, 3.8 s/S)
+(time 0.35 min, 4.2 s/S)
Writing weights file to 'work/weights.0.1' ...
done
---
Best iteration: 1 [SCORE 'stupid_bleu'=0.26638].
-This took 0.31667 min.
+This took 0.35 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.0 b/training/dtrain/examples/parallelized/work/out.1.0
index 6e790e38..65d1e7dc 100644
--- a/training/dtrain/examples/parallelized/work/out.1.0
+++ b/training/dtrain/examples/parallelized/work/out.1.0
@@ -1,9 +1,9 @@
cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../example/nc-wmt11.en.srilm.gz
+Reading ../standard//nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 1432415010
+Seeding random number sequence to 4126799437
dtrain
Parameters:
@@ -16,6 +16,7 @@ Parameters:
learning rate 0.0001
gamma 0
loss margin 1
+ faster perceptron 0
pairs 'XYX'
hi lo 0.1
pair threshold 0
@@ -51,11 +52,11 @@ WEIGHTS
non0 feature count: 11
avg list sz: 100
avg f count: 11.814
-(time 0.45 min, 5.4 s/S)
+(time 0.43 min, 5.2 s/S)
Writing weights file to 'work/weights.1.0' ...
done
---
Best iteration: 1 [SCORE 'stupid_bleu'=0.10863].
-This took 0.45 min.
+This took 0.43333 min.
diff --git a/training/dtrain/examples/parallelized/work/out.1.1 b/training/dtrain/examples/parallelized/work/out.1.1
index 0b984761..f479fbbc 100644
--- a/training/dtrain/examples/parallelized/work/out.1.1
+++ b/training/dtrain/examples/parallelized/work/out.1.1
@@ -1,9 +1,9 @@
cdec cfg 'cdec.ini'
Loading the LM will be faster if you build a binary file.
-Reading ../example/nc-wmt11.en.srilm.gz
+Reading ../standard//nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
-Seeding random number sequence to 1771918374
+Seeding random number sequence to 2112412848
dtrain
Parameters:
@@ -16,6 +16,7 @@ Parameters:
learning rate 0.0001
gamma 0
loss margin 1
+ faster perceptron 0
pairs 'XYX'
hi lo 0.1
pair threshold 0
@@ -52,11 +53,11 @@ WEIGHTS
non0 feature count: 12
avg list sz: 100
avg f count: 11.224
-(time 0.42 min, 5 s/S)
+(time 0.45 min, 5.4 s/S)
Writing weights file to 'work/weights.1.1' ...
done
---
Best iteration: 1 [SCORE 'stupid_bleu'=0.13169].
-This took 0.41667 min.
+This took 0.45 min.
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index e1072d30..23e94285 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -10,15 +10,15 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
-epochs=2 # run over input 2 times
-k=100 # use 100best lists
-N=4 # optimize (approx) BLEU4
-scorer=stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron)
-gamma=0 # use SVM reg
-sample_from=kbest # use kbest lists (as opposed to forest)
-filter=uniq # only unique entries in kbest (surface form)
-pair_sampling=XYX #
-hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
-pair_threshold=0 # minimum distance in BLEU (here: > 0)
-loss_margin=0 # update if correctly ranked, but within this margin
+epochs=2 # run over input 2 times
+k=100 # use 100best lists
+N=4 # optimize (approx) BLEU4
+scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
+learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron)
+gamma=0 # use SVM reg
+sample_from=kbest # use kbest lists (as opposed to forest)
+filter=uniq # only unique entries in kbest (surface form)
+pair_sampling=XYX #
+hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
+pair_threshold=0 # minimum distance in BLEU (here: > 0)
+loss_margin=0 # update if correctly ranked, but within this margin
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index 7cd09dbf..21f91244 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,14 +4,14 @@ Reading ./nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Example feature: Shape_S00000_T00000
-Seeding random number sequence to 2679584485
+Seeding random number sequence to 970626287
dtrain
Parameters:
k 100
N 4
T 2
- scorer 'stupid_bleu'
+ scorer 'fixed_stupid_bleu'
sample from 'kbest'
filter 'uniq'
learning rate 1
@@ -34,58 +34,58 @@ Iteration #1 of 2.
. 10
Stopping after 10 input sentences.
WEIGHTS
- Glue = -576
- WordPenalty = +417.79
- LanguageModel = +5117.5
- LanguageModel_OOV = -1307
- PhraseModel_0 = -1612
- PhraseModel_1 = -2159.6
- PhraseModel_2 = -677.36
- PhraseModel_3 = +2663.8
- PhraseModel_4 = -1025.9
- PhraseModel_5 = -8
- PhraseModel_6 = +70
- PassThrough = -1455
+ Glue = -614
+ WordPenalty = +1256.8
+ LanguageModel = +5610.5
+ LanguageModel_OOV = -1449
+ PhraseModel_0 = -2107
+ PhraseModel_1 = -4666.1
+ PhraseModel_2 = -2713.5
+ PhraseModel_3 = +4204.3
+ PhraseModel_4 = -1435.8
+ PhraseModel_5 = +916
+ PhraseModel_6 = +190
+ PassThrough = -2527
---
- 1best avg score: 0.27697 (+0.27697)
- 1best avg model score: -47918 (-47918)
- avg # pairs: 581.9 (meaningless)
- avg # rank err: 581.9
+ 1best avg score: 0.17874 (+0.17874)
+ 1best avg model score: 88399 (+88399)
+ avg # pairs: 798.2 (meaningless)
+ avg # rank err: 798.2
avg # margin viol: 0
- non0 feature count: 703
- avg list sz: 90.9
- avg f count: 100.09
-(time 0.25 min, 1.5 s/S)
+ non0 feature count: 887
+ avg list sz: 91.3
+ avg f count: 126.85
+(time 0.33 min, 2 s/S)
Iteration #2 of 2.
. 10
WEIGHTS
- Glue = -622
- WordPenalty = +898.56
- LanguageModel = +8066.2
- LanguageModel_OOV = -2590
- PhraseModel_0 = -4335.8
- PhraseModel_1 = -5864.4
- PhraseModel_2 = -1729.8
- PhraseModel_3 = +2831.9
- PhraseModel_4 = -5384.8
- PhraseModel_5 = +1449
- PhraseModel_6 = +480
- PassThrough = -2578
+ Glue = -1025
+ WordPenalty = +1751.5
+ LanguageModel = +10059
+ LanguageModel_OOV = -4490
+ PhraseModel_0 = -2640.7
+ PhraseModel_1 = -3757.4
+ PhraseModel_2 = -1133.1
+ PhraseModel_3 = +1837.3
+ PhraseModel_4 = -3534.3
+ PhraseModel_5 = +2308
+ PhraseModel_6 = +1677
+ PassThrough = -6222
---
- 1best avg score: 0.37119 (+0.094226)
- 1best avg model score: -1.3174e+05 (-83822)
- avg # pairs: 584.1 (meaningless)
- avg # rank err: 584.1
+ 1best avg score: 0.30764 (+0.12891)
+ 1best avg model score: -2.5042e+05 (-3.3882e+05)
+ avg # pairs: 725.9 (meaningless)
+ avg # rank err: 725.9
avg # margin viol: 0
- non0 feature count: 1115
+ non0 feature count: 1499
avg list sz: 91.3
- avg f count: 90.755
-(time 0.3 min, 1.8 s/S)
+ avg f count: 114.34
+(time 0.32 min, 1.9 s/S)
Writing weights file to '-' ...
done
---
-Best iteration: 2 [SCORE 'stupid_bleu'=0.37119].
-This took 0.55 min.
+Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764].
+This took 0.65 min.
diff --git a/training/dtrain/kbestget.h b/training/dtrain/kbestget.h
index dd8882e1..85252db3 100644
--- a/training/dtrain/kbestget.h
+++ b/training/dtrain/kbestget.h
@@ -1,76 +1,12 @@
#ifndef _DTRAIN_KBESTGET_H_
#define _DTRAIN_KBESTGET_H_
-#include "kbest.h" // cdec
-#include "sentence_metadata.h"
-
-#include "verbose.h"
-#include "viterbi.h"
-#include "ff_register.h"
-#include "decoder.h"
-#include "weights.h"
-#include "logval.h"
-
-using namespace std;
+#include "kbest.h"
namespace dtrain
{
-typedef double score_t;
-
-struct ScoredHyp
-{
- vector<WordID> w;
- SparseVector<double> f;
- score_t model;
- score_t score;
- unsigned rank;
-};
-
-struct LocalScorer
-{
- unsigned N_;
- vector<score_t> w_;
-
- virtual score_t
- Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned rank, const unsigned src_len)=0;
-
- void Reset() {} // only for approx bleu
-
- inline void
- Init(unsigned N, vector<score_t> weights)
- {
- assert(N > 0);
- N_ = N;
- if (weights.empty()) for (unsigned i = 0; i < N_; i++) w_.push_back(1./N_);
- else w_ = weights;
- }
-
- inline score_t
- brevity_penalty(const unsigned hyp_len, const unsigned ref_len)
- {
- if (hyp_len > ref_len) return 1;
- return exp(1 - (score_t)ref_len/hyp_len);
- }
-};
-
-struct HypSampler : public DecoderObserver
-{
- LocalScorer* scorer_;
- vector<WordID>* ref_;
- unsigned f_count_, sz_;
- virtual vector<ScoredHyp>* GetSamples()=0;
- inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; }
- inline void SetRef(vector<WordID>& ref) { ref_ = &ref; }
- inline unsigned get_f_count() { return f_count_; }
- inline unsigned get_sz() { return sz_; }
-};
-////////////////////////////////////////////////////////////////////////////////
-
-
-
-
struct KBestGetter : public HypSampler
{
const unsigned k_;
diff --git a/training/dtrain/ksampler.h b/training/dtrain/ksampler.h
index bc2f56cd..29dab667 100644
--- a/training/dtrain/ksampler.h
+++ b/training/dtrain/ksampler.h
@@ -1,13 +1,12 @@
#ifndef _DTRAIN_KSAMPLER_H_
#define _DTRAIN_KSAMPLER_H_
-#include "hg_sampler.h" // cdec
-#include "kbestget.h"
-#include "score.h"
+#include "hg_sampler.h"
namespace dtrain
{
+
bool
cmp_hyp_by_model_d(ScoredHyp a, ScoredHyp b)
{
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index e661416e..285f3c9b 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -4,7 +4,7 @@ require 'trollop'
def usage
STDERR.write "Usage: "
- STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"]\n"
+ STDERR.write "ruby parallelize.rb -c <dtrain.ini> [-e <epochs=10>] [--randomize/-z] [--reshard/-y] -s <#shards|0> [-p <at once=9999>] -i <input> -r <refs> [--qsub/-q] [--dtrain_binary <path to dtrain binary>] [-l \"l2 select_k 100000\"] [--extra_qsub \"-l virtual_free=24G\"]\n"
exit 1
end
@@ -20,6 +20,7 @@ opts = Trollop::options do
opt :references, "references", :type => :string
opt :qsub, "use qsub", :type => :bool, :default => false
opt :dtrain_binary, "path to dtrain binary", :type => :string
+ opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
end
usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
@@ -119,11 +120,11 @@ end
qsub_str_start = qsub_str_end = ''
local_end = ''
if use_qsub
- qsub_str_start = "qsub -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \""
+ qsub_str_start = "qsub #{opts[:extra_qsub]} -cwd -sync y -b y -j y -o work/out.#{shard}.#{epoch} -N dtrain.#{shard}.#{epoch} \""
qsub_str_end = "\""
local_end = ''
else
- local_end = "&>work/out.#{shard}.#{epoch}"
+ local_end = "2>work/out.#{shard}.#{epoch}"
end
pids << Kernel.fork {
`#{qsub_str_start}#{dtrain_bin} -c #{ini}\
diff --git a/training/dtrain/score.h b/training/dtrain/score.h
index bddaa071..53e970ba 100644
--- a/training/dtrain/score.h
+++ b/training/dtrain/score.h
@@ -1,9 +1,7 @@
#ifndef _DTRAIN_SCORE_H_
#define _DTRAIN_SCORE_H_
-#include "kbestget.h"
-
-using namespace std;
+#include "dtrain.h"
namespace dtrain
{
@@ -141,36 +139,43 @@ struct BleuScorer : public LocalScorer
{
score_t Bleu(NgramCounts& counts, const unsigned hyp_len, const unsigned ref_len);
score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct StupidBleuScorer : public LocalScorer
{
score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct FixedStupidBleuScorer : public LocalScorer
{
score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct SmoothBleuScorer : public LocalScorer
{
score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct SumBleuScorer : public LocalScorer
{
- score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct SumExpBleuScorer : public LocalScorer
{
- score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {}
};
struct SumWhateverBleuScorer : public LocalScorer
{
- score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ score_t Score(vector<WordID>& hyp, vector<WordID>& ref, const unsigned /*rank*/, const unsigned /*src_len*/);
+ void Reset() {};
};
struct ApproxBleuScorer : public BleuScorer