summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorredpony <redpony@ec762483-ff6d-05da-a07a-a48fb63a330f>2010-07-02 19:23:08 +0000
committerredpony <redpony@ec762483-ff6d-05da-a07a-a48fb63a330f>2010-07-02 19:23:08 +0000
commit36b0eac74f5d8f8674659826a72276b47d687bd6 (patch)
treeaf55f23790cb50810637af56e3de8fcdb6db7868
parent0f0ffedff50f371128a1305e475dafdc19aaa26c (diff)
filter and score in a single file
git-svn-id: https://ws10smt.googlecode.com/svn/trunk@118 ec762483-ff6d-05da-a07a-a48fb63a330f
-rw-r--r--decoder/sparse_vector.h17
-rw-r--r--extools/Makefile.am15
-rw-r--r--extools/filter_grammar.cc196
-rw-r--r--extools/filter_score_grammar.cc450
-rw-r--r--extools/score_grammar.cc371
-rwxr-xr-xgi/pipeline/filter-for-test-set.pl68
-rwxr-xr-xgi/pipeline/local-gi-pipeline.pl6
7 files changed, 470 insertions, 653 deletions
diff --git a/decoder/sparse_vector.h b/decoder/sparse_vector.h
index 66c9b10d..896e8c43 100644
--- a/decoder/sparse_vector.h
+++ b/decoder/sparse_vector.h
@@ -187,17 +187,26 @@ public:
return result /= x;
}
- std::ostream &operator<<(std::ostream &out) const {
+ std::ostream &operator<<(std::ostream& out) const {
+ Write(true, &out);
+ return out;
+ }
+
+ void Write(const bool with_semi, std::ostream* os) const {
bool first = true;
for (typename MapType::const_iterator
it = values_.begin(); it != values_.end(); ++it) {
// by definition feature id 0 is a dummy value
if (it->first == 0) continue;
- out << (first ? "" : ";")
- << FD::Convert(it->first) << '=' << it->second;
+ if (with_semi) {
+ (*os) << (first ? "" : ";")
+ << FD::Convert(it->first) << '=' << it->second;
+ } else {
+ (*os) << (first ? "" : " ")
+ << FD::Convert(it->first) << '=' << it->second;
+ }
first = false;
}
- return out;
}
bool operator<(const SparseVector<T> &other) const {
diff --git a/extools/Makefile.am b/extools/Makefile.am
index 5baefa21..bce6c404 100644
--- a/extools/Makefile.am
+++ b/extools/Makefile.am
@@ -2,11 +2,14 @@ bin_PROGRAMS = \
extractor \
mr_stripe_rule_reduce \
build_lexical_translation \
- score_grammar \
- filter_grammar
+ filter_score_grammar
noinst_PROGRAMS =
+filter_score_grammar_SOURCES = filter_score_grammar.cc extract.cc sentence_pair.cc
+filter_score_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a -lz
+filter_score_grammar_LDFLAGS = -all-static
+
build_lexical_translation_SOURCES = build_lexical_translation.cc extract.cc sentence_pair.cc
build_lexical_translation_LDADD = $(top_srcdir)/decoder/libcdec.a -lz
build_lexical_translation_LDFLAGS = -all-static
@@ -19,13 +22,5 @@ extractor_SOURCES = sentence_pair.cc extract.cc extractor.cc
extractor_LDADD = $(top_srcdir)/decoder/libcdec.a -lz
extractor_LDFLAGS = -all-static
-filter_grammar_SOURCES = sentence_pair.cc extract.cc filter_grammar.cc
-filter_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a -lz
-filter_grammar_LDFLAGS = -all-static
-
-score_grammar_SOURCES = sentence_pair.cc score_grammar.cc extract.cc
-score_grammar_LDADD = $(top_srcdir)/decoder/libcdec.a -lz
-score_grammar_LDFLAGS = -all-static
-
AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/decoder
diff --git a/extools/filter_grammar.cc b/extools/filter_grammar.cc
deleted file mode 100644
index de052e49..00000000
--- a/extools/filter_grammar.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Build suffix tree representation of a data set for grammar filtering
- * ./filter_grammar <test set> < unfiltered.grammar > filter.grammar
- *
- */
-#include <iostream>
-#include <string>
-#include <map>
-#include <vector>
-#include <utility>
-#include <cstdlib>
-#include <fstream>
-#include <tr1/unordered_map>
-
-#include "filelib.h"
-#include "sentence_pair.h"
-#include "suffix_tree.h"
-#include "extract.h"
-#include "fdict.h"
-#include "tdict.h"
-
-#include <boost/functional/hash.hpp>
-#include <boost/program_options.hpp>
-#include <boost/program_options/variables_map.hpp>
-
-
-using namespace std;
-using namespace std::tr1;
-
-static const size_t MAX_LINE_LENGTH = 64000000;
-
-typedef unordered_map<vector<WordID>, RuleStatistics, boost::hash<vector<WordID> > > ID2RuleStatistics;
-
-
-namespace {
- inline bool IsWhitespace(char c) { return c == ' ' || c == '\t'; }
- inline bool IsBracket(char c){return c == '[' || c == ']';}
- inline void SkipWhitespace(const char* buf, int* ptr) {
- while (buf[*ptr] && IsWhitespace(buf[*ptr])) { ++(*ptr); }
- }
-}
-
-
-
-int ReadPhraseUntilDividerOrEnd(const char* buf, const int sstart, const int end, vector<WordID>* p) {
- static const WordID kDIV = TD::Convert("|||");
-
- int ptr = sstart;
- while(ptr < end) {
- while(ptr < end && IsWhitespace(buf[ptr])) { ++ptr; }
- int start = ptr;
- while(ptr < end && !IsWhitespace(buf[ptr])) { ++ptr; }
- if (ptr == start) {cerr << "Warning! empty token.\n"; return ptr; }
- //look in the buffer and see if its a nonterminal marker before integerizing it to wordID-anything with [...] or |||
-
- const WordID w = TD::Convert(string(buf, start, ptr - start));
-
- if((IsBracket(buf[start]) and IsBracket(buf[ptr-1])) or( w == kDIV))
- p->push_back(-1);
- else {
- if (w == kDIV) return ptr;
- p->push_back(w);
- }
- }
- return ptr;
-}
-
-
-
-void ParseLine(const char* buf, vector<WordID>* cur_key, ID2RuleStatistics* counts) {
- static const WordID kDIV = TD::Convert("|||");
- counts->clear();
- int ptr = 0;
- while(buf[ptr] != 0 && buf[ptr] != '\t') { ++ptr; }
- if (buf[ptr] != '\t') {
- cerr << "Missing tab separator between key and value!\n INPUT=" << buf << endl;
- exit(1);
- }
- cur_key->clear();
- // key is: "[X] ||| word word word"
- int tmpp = ReadPhraseUntilDividerOrEnd(buf, 0, ptr, cur_key);
- cur_key->push_back(kDIV);
- ReadPhraseUntilDividerOrEnd(buf, tmpp, ptr, cur_key);
- ++ptr;
- int start = ptr;
- int end = ptr;
- int state = 0; // 0=reading label, 1=reading count
- vector<WordID> name;
- while(buf[ptr] != 0) {
- while(buf[ptr] != 0 && buf[ptr] != '|') { ++ptr; }
- if (buf[ptr] == '|') {
- ++ptr;
- if (buf[ptr] == '|') {
- ++ptr;
- if (buf[ptr] == '|') {
- ++ptr;
- end = ptr - 3;
- while (end > start && IsWhitespace(buf[end-1])) { --end; }
- if (start == end) {
- cerr << "Got empty token!\n LINE=" << buf << endl;
- exit(1);
- }
- switch (state) {
- case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
- case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
- default: cerr << "Can't happen\n"; abort();
- }
- SkipWhitespace(buf, &ptr);
- start = ptr;
- }
- }
- }
- }
- end=ptr;
- while (end > start && IsWhitespace(buf[end-1])) { --end; }
- if (end > start) {
- switch (state) {
- case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
- case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
- default: cerr << "Can't happen\n"; abort();
- }
- }
-}
-
-
-
-
-
-
-
-int main(int argc, char* argv[]){
- if (argc != 2) {
- cerr << "Usage: " << argv[0] << " testset.txt < unfiltered.grammar\n";
- return 1;
- }
-
- assert(FileExists(argv[1]));
- ReadFile rfts(argv[1]);
- istream& testSet = *rfts.stream();
- ofstream filter_grammar_;
- bool DEBUG = false;
-
- AnnotatedParallelSentence sent;
- char* buf = new char[MAX_LINE_LENGTH];
- cerr << "Build suffix tree from test set in " << argv[1] << endl;
- //root of the suffix tree
- Node<int> root;
- int line=0;
-
- /* process the data set to build suffix tree
- */
- while(!testSet.eof()) {
- ++line;
- testSet.getline(buf, MAX_LINE_LENGTH);
- if (buf[0] == 0) continue;
-
- //hack to read in the test set using the alignedparallelsentence methods
- strcat(buf," ||| fake ||| 0-0");
- sent.ParseInputLine(buf);
-
- if (DEBUG)cerr << line << "||| " << buf << " -- " << sent.f_len << endl;
-
- //add each successive suffix to the tree
- for(int i =0;i<sent.f_len;i++)
- root.InsertPath(sent.f, i, sent.f_len - 1);
- if(DEBUG)cerr<<endl;
-
- }
-
- cerr << "Filtering grammar..." << endl;
- //process the unfiltered, unscored grammar
-
- ID2RuleStatistics cur_counts;
- vector<WordID> cur_key;
- line = 0;
-
- while(cin) {
- ++line;
- cin.getline(buf, MAX_LINE_LENGTH);
- if (buf[0] == 0) continue;
- ParseLine(buf, &cur_key, &cur_counts);
- const Node<int>* curnode = &root;
- for(int i=0;i<cur_key.size() - 1; i++) {
- if (DEBUG) cerr << line << " " << cur_key[i] << " ::: ";
- if (cur_key[i] == -1) { // non-terminal
- curnode = &root;
- } else if (curnode) {
- curnode = curnode->Extend(cur_key[i]);
- if (!curnode) break;
- }
- }
- if(curnode) cout << buf << endl;
- }
-
- return 0;
-}
diff --git a/extools/filter_score_grammar.cc b/extools/filter_score_grammar.cc
new file mode 100644
index 00000000..e1fd714b
--- /dev/null
+++ b/extools/filter_score_grammar.cc
@@ -0,0 +1,450 @@
+/*
+ * Filter & score a grammar in striped format
+ */
+#include <iostream>
+#include <string>
+#include <map>
+#include <vector>
+#include <utility>
+#include <cstdlib>
+#include <fstream>
+#include <tr1/unordered_map>
+
+#include "suffix_tree.h"
+#include "sparse_vector.h"
+#include "sentence_pair.h"
+#include "extract.h"
+#include "fdict.h"
+#include "tdict.h"
+#include "lex_trans_tbl.h"
+#include "filelib.h"
+
+#include <boost/shared_ptr.hpp>
+#include <boost/functional/hash.hpp>
+#include <boost/program_options.hpp>
+#include <boost/program_options/variables_map.hpp>
+
+using namespace std;
+using namespace std::tr1;
+namespace po = boost::program_options;
+
+static const size_t MAX_LINE_LENGTH = 64000000;
+
+typedef unordered_map<vector<WordID>, RuleStatistics, boost::hash<vector<WordID> > > ID2RuleStatistics;
+
+void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
+ po::options_description opts("Configuration options");
+ opts.add_options()
+ ("test_set,t", po::value<string>(), "Filter for this test set (not specified = no filtering)")
+ ("top_e_given_f,n", po::value<size_t>()->default_value(30), "Keep top N rules, according to p(e|f). 0 for all")
+ ("aligned_corpus,c", po::value<string>(), "Aligned corpus (single line format)")
+ ("help,h", "Print this help message and exit");
+ po::options_description clo("Command line options");
+ po::options_description dcmdline_options;
+ dcmdline_options.add(opts);
+
+ po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
+ po::notify(*conf);
+
+ if (conf->count("help") || conf->count("aligned_corpus")==0) {
+ cerr << "\nUsage: filter_score_grammar -t TEST-SET.fr -c ALIGNED_CORPUS.fr-en-al [-options] < grammar\n";
+ cerr << dcmdline_options << endl;
+ exit(1);
+ }
+}
+namespace {
+ inline bool IsWhitespace(char c) { return c == ' ' || c == '\t'; }
+ inline bool IsBracket(char c){return c == '[' || c == ']';}
+ inline void SkipWhitespace(const char* buf, int* ptr) {
+ while (buf[*ptr] && IsWhitespace(buf[*ptr])) { ++(*ptr); }
+ }
+}
+
+int ReadPhraseUntilDividerOrEnd(const char* buf, const int sstart, const int end, vector<WordID>* p) {
+ static const WordID kDIV = TD::Convert("|||");
+ int ptr = sstart;
+ while(ptr < end) {
+ while(ptr < end && IsWhitespace(buf[ptr])) { ++ptr; }
+ int start = ptr;
+ while(ptr < end && !IsWhitespace(buf[ptr])) { ++ptr; }
+ if (ptr == start) {cerr << "Warning! empty token.\n"; return ptr; }
+ const WordID w = TD::Convert(string(buf, start, ptr - start));
+
+ if((IsBracket(buf[start]) and IsBracket(buf[ptr-1])) or( w == kDIV))
+ p->push_back(1 * w);
+ else {
+ if (w == kDIV) return ptr;
+ p->push_back(w);
+ }
+ }
+ return ptr;
+}
+
+
+void ParseLine(const char* buf, vector<WordID>* cur_key, ID2RuleStatistics* counts) {
+ static const WordID kDIV = TD::Convert("|||");
+ counts->clear();
+ int ptr = 0;
+ while(buf[ptr] != 0 && buf[ptr] != '\t') { ++ptr; }
+ if (buf[ptr] != '\t') {
+ cerr << "Missing tab separator between key and value!\n INPUT=" << buf << endl;
+ exit(1);
+ }
+ cur_key->clear();
+ // key is: "[X] ||| word word word"
+ int tmpp = ReadPhraseUntilDividerOrEnd(buf, 0, ptr, cur_key);
+ cur_key->push_back(kDIV);
+ ReadPhraseUntilDividerOrEnd(buf, tmpp, ptr, cur_key);
+ ++ptr;
+ int start = ptr;
+ int end = ptr;
+ int state = 0; // 0=reading label, 1=reading count
+ vector<WordID> name;
+ while(buf[ptr] != 0) {
+ while(buf[ptr] != 0 && buf[ptr] != '|') { ++ptr; }
+ if (buf[ptr] == '|') {
+ ++ptr;
+ if (buf[ptr] == '|') {
+ ++ptr;
+ if (buf[ptr] == '|') {
+ ++ptr;
+ end = ptr - 3;
+ while (end > start && IsWhitespace(buf[end-1])) { --end; }
+ if (start == end) {
+ cerr << "Got empty token!\n LINE=" << buf << endl;
+ exit(1);
+ }
+ switch (state) {
+ case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
+ case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
+ default: cerr << "Can't happen\n"; abort();
+ }
+ SkipWhitespace(buf, &ptr);
+ start = ptr;
+ }
+ }
+ }
+ }
+ end=ptr;
+ while (end > start && IsWhitespace(buf[end-1])) { --end; }
+ if (end > start) {
+ switch (state) {
+ case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
+ case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
+ default: cerr << "Can't happen\n"; abort();
+ }
+ }
+}
+
+
+void LexTranslationTable::createTTable(const char* buf){
+ AnnotatedParallelSentence sent;
+ sent.ParseInputLine(buf);
+
+ //iterate over the alignment to compute aligned words
+
+ for(int i =0;i<sent.aligned.width();i++)
+ {
+ for (int j=0;j<sent.aligned.height();j++)
+ {
+ if (DEBUG) cerr << sent.aligned(i,j) << " ";
+ if( sent.aligned(i,j))
+ {
+ if (DEBUG) cerr << TD::Convert(sent.f[i]) << " aligned to " << TD::Convert(sent.e[j]);
+ ++word_translation[pair<WordID,WordID> (sent.f[i], sent.e[j])];
+ ++total_foreign[sent.f[i]];
+ ++total_english[sent.e[j]];
+ }
+ }
+ if (DEBUG) cerr << endl;
+ }
+ if (DEBUG) cerr << endl;
+
+ const WordID NULL_ = TD::Convert("NULL");
+ //handle unaligned words - align them to null
+ for (int j =0; j < sent.e_len; j++) {
+ if (sent.e_aligned[j]) continue;
+ ++word_translation[pair<WordID,WordID> (NULL_, sent.e[j])];
+ ++total_foreign[NULL_];
+ ++total_english[sent.e[j]];
+ }
+
+ for (int i =0; i < sent.f_len; i++) {
+ if (sent.f_aligned[i]) continue;
+ ++word_translation[pair<WordID,WordID> (sent.f[i], NULL_)];
+ ++total_english[NULL_];
+ ++total_foreign[sent.f[i]];
+ }
+}
+
+
+inline float safenlog(float v) {
+ if (v == 1.0f) return 0.0f;
+ float res = -log(v);
+ if (res > 100.0f) res = 100.0f;
+ return res;
+}
+
+struct SourceFilter {
+ // return true to keep the rule, otherwise false
+ virtual bool Matches(const vector<WordID>& key) const = 0;
+ virtual ~SourceFilter() {}
+};
+
+struct DumbSuffixTreeFilter : SourceFilter {
+ DumbSuffixTreeFilter(const string& corpus) :
+ kDIV(TD::Convert("|||")) {
+ cerr << "Build suffix tree from test set in " << corpus << endl;
+ assert(FileExists(corpus));
+ ReadFile rfts(corpus);
+ istream& testSet = *rfts.stream();
+ char* buf = new char[MAX_LINE_LENGTH];
+ AnnotatedParallelSentence sent;
+
+ /* process the data set to build suffix tree
+ */
+ while(!testSet.eof()) {
+ testSet.getline(buf, MAX_LINE_LENGTH);
+ if (buf[0] == 0) continue;
+
+ //hack to read in the test set using AnnotatedParallelSentence
+ strcat(buf," ||| fake ||| 0-0");
+ sent.ParseInputLine(buf);
+
+ //add each successive suffix to the tree
+ for(int i=0; i<sent.f_len; i++)
+ root.InsertPath(sent.f, i, sent.f_len - 1);
+ }
+ delete[] buf;
+ }
+ virtual bool Matches(const vector<WordID>& key) const {
+ const Node<int>* curnode = &root;
+ const int ks = key.size() - 1;
+ for(int i=0; i < ks; i++) {
+ const string& word = TD::Convert(key[i]);
+ if (key[i] == kDIV || (word[0] == '[' && word[word.size() - 1] == ']')) { // non-terminal
+ curnode = &root;
+ } else if (curnode) {
+ curnode = curnode->Extend(key[i]);
+ if (!curnode) return false;
+ }
+ }
+ return true;
+ }
+ const WordID kDIV;
+ Node<int> root;
+};
+
+struct FeatureExtractor {
+ FeatureExtractor(const std::string& name) : extractor_name(name) {}
+ virtual void ExtractFeatures(const vector<WordID>& lhs_src,
+ const vector<WordID>& trg,
+ const RuleStatistics& info,
+ SparseVector<float>* result) const = 0;
+ virtual ~FeatureExtractor() {}
+ const string extractor_name;
+};
+
+struct EGivenFExtractor : public FeatureExtractor {
+ EGivenFExtractor() :
+ FeatureExtractor("EGivenF"),
+ fid_(FD::Convert("EGivenF")), kCF(FD::Convert("CF")), kCFE(FD::Convert("CFE")) {}
+ virtual void ExtractFeatures(const vector<WordID>& lhs_src,
+ const vector<WordID>& trg,
+ const RuleStatistics& info,
+ SparseVector<float>* result) const {
+ (void) lhs_src; (void) trg;
+ assert(info.counts.value(kCF) > 0.0f);
+ result->set_value(fid_, safenlog(info.counts.value(kCFE) / info.counts.value(kCF)));
+ }
+ const int fid_, kCF, kCFE;
+};
+
+struct FGivenEExtractor : public FeatureExtractor {
+ FGivenEExtractor() :
+ FeatureExtractor("FGivenE"),
+ fid_(FD::Convert("FGivenE")), kCE(FD::Convert("CE")), kCFE(FD::Convert("CFE")) {}
+ virtual void ExtractFeatures(const vector<WordID>& lhs_src,
+ const vector<WordID>& trg,
+ const RuleStatistics& info,
+ SparseVector<float>* result) const {
+ (void) lhs_src; (void) trg;
+ assert(info.counts.value(kCE) > 0.0f);
+ result->set_value(fid_, safenlog(info.counts.value(kCFE) / info.counts.value(kCE)));
+ }
+ const int fid_, kCE, kCFE;
+};
+
+// this extracts the lexical translation prob features
+// in BOTH directions.
+struct LexProbExtractor : public FeatureExtractor {
+ LexProbExtractor(const std::string& corpus) :
+ FeatureExtractor("LexProb"), e2f_(FD::Convert("LexE2F")), f2e_(FD::Convert("LexF2E")) {
+ ReadFile rf(corpus);
+ //create lexical translation table
+ cerr << "Computing lexical translation probabilities from " << corpus << "..." << endl;
+ char* buf = new char[MAX_LINE_LENGTH];
+ istream& alignment = *rf.stream();
+ while(alignment) {
+ alignment.getline(buf, MAX_LINE_LENGTH);
+ if (buf[0] == 0) continue;
+ table.createTTable(buf);
+ }
+ delete[] buf;
+#if 0
+ bool PRINT_TABLE=false;
+ if (PRINT_TABLE) {
+ ofstream trans_table;
+ trans_table.open("lex_trans_table.out");
+ for(map < pair<WordID,WordID>,int >::iterator it = table.word_translation.begin(); it != table.word_translation.end(); ++it) {
+ trans_table << TD::Convert(trg.first) << "|||" << TD::Convert(trg.second) << "==" << it->second << "//" << table.total_foreign[trg.first] << "//" << table.total_english[trg.second] << endl;
+ }
+ trans_table.close();
+ }
+#endif
+ }
+
+ virtual void ExtractFeatures(const vector<WordID>& lhs_src,
+ const vector<WordID>& trg,
+ const RuleStatistics& info,
+ SparseVector<float>* result) const {
+ map <WordID, pair<int, float> > foreign_aligned;
+ map <WordID, pair<int, float> > english_aligned;
+
+ //Loop over all the alignment points to compute lexical translation probability
+ const vector< pair<short,short> >& al = info.aligns;
+ vector< pair<short,short> >::const_iterator ita;
+ for (ita = al.begin(); ita != al.end(); ++ita) {
+ if (DEBUG) {
+ cerr << "\nA:" << ita->first << "," << ita->second << "::";
+ cerr << TD::Convert(lhs_src[ita->first + 2]) << "-" << TD::Convert(trg[ita->second]);
+ }
+
+ //Lookup this alignment probability in the table
+ int temp = table.word_translation[pair<WordID,WordID> (lhs_src[ita->first+2],trg[ita->second])];
+ float f2e=0, e2f=0;
+ if ( table.total_foreign[lhs_src[ita->first+2]] != 0)
+ f2e = (float) temp / table.total_foreign[lhs_src[ita->first+2]];
+ if ( table.total_english[trg[ita->second]] !=0 )
+ e2f = (float) temp / table.total_english[trg[ita->second]];
+ if (DEBUG) printf (" %d %E %E\n", temp, f2e, e2f);
+
+ //local counts to keep track of which things haven't been aligned, to later compute their null alignment
+ if (foreign_aligned.count(lhs_src[ita->first+2])) {
+ foreign_aligned[ lhs_src[ita->first+2] ].first++;
+ foreign_aligned[ lhs_src[ita->first+2] ].second += e2f;
+ } else {
+ foreign_aligned[ lhs_src[ita->first+2] ] = pair<int,float> (1,e2f);
+ }
+
+ if (english_aligned.count( trg[ ita->second] )) {
+ english_aligned[ trg[ ita->second] ].first++;
+ english_aligned[ trg[ ita->second] ].second += f2e;
+ } else {
+ english_aligned[ trg[ ita->second] ] = pair<int,float> (1,f2e);
+ }
+ }
+
+ float final_lex_f2e=1, final_lex_e2f=1;
+ static const WordID NULL_ = TD::Convert("NULL");
+
+ //compute lexical weight P(F|E) and include unaligned foreign words
+ for(int i=0;i<lhs_src.size(); i++) {
+ if (!table.total_foreign.count(lhs_src[i])) continue; //if we dont have it in the translation table, we won't know its lexical weight
+
+ if (foreign_aligned.count(lhs_src[i]))
+ {
+ pair<int, float> temp_lex_prob = foreign_aligned[lhs_src[i]];
+ final_lex_e2f *= temp_lex_prob.second / temp_lex_prob.first;
+ }
+ else //dealing with null alignment
+ {
+ int temp_count = table.word_translation[pair<WordID,WordID> (lhs_src[i],NULL_)];
+ float temp_e2f = (float) temp_count / table.total_english[NULL_];
+ final_lex_e2f *= temp_e2f;
+ }
+
+ }
+
+ //compute P(E|F) unaligned english words
+ for(int j=0; j< trg.size(); j++) {
+ if (!table.total_english.count(trg[j])) continue;
+
+ if (english_aligned.count(trg[j]))
+ {
+ pair<int, float> temp_lex_prob = english_aligned[trg[j]];
+ final_lex_f2e *= temp_lex_prob.second / temp_lex_prob.first;
+ }
+ else //dealing with null
+ {
+ int temp_count = table.word_translation[pair<WordID,WordID> (NULL_,trg[j])];
+ float temp_f2e = (float) temp_count / table.total_foreign[NULL_];
+ final_lex_f2e *= temp_f2e;
+ }
+ }
+ result->set_value(e2f_, safenlog(final_lex_e2f));
+ result->set_value(f2e_, safenlog(final_lex_f2e));
+ }
+ const int e2f_, f2e_;
+ mutable LexTranslationTable table;
+};
+
+int main(int argc, char** argv){
+ po::variables_map conf;
+ InitCommandLine(argc, argv, &conf);
+ const int max_options = conf["top_e_given_f"].as<size_t>();;
+ ifstream alignment (conf["aligned_corpus"].as<string>().c_str());
+ istream& unscored_grammar = cin;
+ ostream& scored_grammar = cout;
+
+ boost::shared_ptr<SourceFilter> filter;
+ if (conf.count("test_set"))
+ filter.reset(new DumbSuffixTreeFilter(conf["test_set"].as<string>()));
+
+ // TODO make this list configurable
+ vector<boost::shared_ptr<FeatureExtractor> > extractors;
+ extractors.push_back(boost::shared_ptr<FeatureExtractor>(new EGivenFExtractor));
+ extractors.push_back(boost::shared_ptr<FeatureExtractor>(new FGivenEExtractor));
+ extractors.push_back(boost::shared_ptr<FeatureExtractor>(new LexProbExtractor(conf["aligned_corpus"].as<string>())));
+
+ //score unscored grammar
+ cerr <<"Scoring grammar..." << endl;
+ char* buf = new char[MAX_LINE_LENGTH];
+
+ ID2RuleStatistics acc, cur_counts;
+ vector<WordID> key, cur_key,temp_key;
+ int line = 0;
+
+ const int kEGivenF = FD::Convert("EGivenF");
+ multimap<float, string> options;
+ while(!unscored_grammar.eof())
+ {
+ ++line;
+ options.clear();
+ unscored_grammar.getline(buf, MAX_LINE_LENGTH);
+ if (buf[0] == 0) continue;
+ ParseLine(buf, &cur_key, &cur_counts);
+ if (!filter || filter->Matches(cur_key)) {
+ //loop over all the Target side phrases that this source aligns to
+ for (ID2RuleStatistics::const_iterator it = cur_counts.begin(); it != cur_counts.end(); ++it) {
+
+ SparseVector<float> feats;
+ for (int i = 0; i < extractors.size(); ++i)
+ extractors[i]->ExtractFeatures(cur_key, it->first, it->second, &feats);
+
+ ostringstream os;
+ os << TD::GetString(cur_key)
+ << ' ' << TD::GetString(it->first) << " ||| ";
+ feats.Write(false, &os);
+ options.insert(make_pair(feats.value(kEGivenF), os.str()));
+ }
+ int ocount = 0;
+ for (multimap<float,string>::iterator it = options.begin(); it != options.end(); ++it) {
+ scored_grammar << it->second << endl;
+ ++ocount;
+ if (ocount == max_options) break;
+ }
+ }
+ }
+}
+
diff --git a/extools/score_grammar.cc b/extools/score_grammar.cc
deleted file mode 100644
index 7cdcdb64..00000000
--- a/extools/score_grammar.cc
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Score a grammar in striped format
- * ./score_grammar <alignment> < filtered.grammar > scored.grammar
- */
-#include <iostream>
-#include <string>
-#include <map>
-#include <vector>
-#include <utility>
-#include <cstdlib>
-#include <fstream>
-#include <tr1/unordered_map>
-
-#include "sentence_pair.h"
-#include "extract.h"
-#include "fdict.h"
-#include "tdict.h"
-#include "lex_trans_tbl.h"
-#include "filelib.h"
-
-#include <boost/functional/hash.hpp>
-#include <boost/program_options.hpp>
-#include <boost/program_options/variables_map.hpp>
-
-using namespace std;
-using namespace std::tr1;
-namespace po = boost::program_options;
-
-static const size_t MAX_LINE_LENGTH = 64000000;
-
-typedef unordered_map<vector<WordID>, RuleStatistics, boost::hash<vector<WordID> > > ID2RuleStatistics;
-
-void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
- po::options_description opts("Configuration options");
- opts.add_options()
- ("top_e_given_f,n", po::value<size_t>()->default_value(30), "Keep top N rules, according to p(e|f). 0 for all")
- ("aligned_corpus,c", po::value<string>(), "Aligned corpus (single line format)")
- ("help,h", "Print this help message and exit");
- po::options_description clo("Command line options");
- po::options_description dcmdline_options;
- dcmdline_options.add(opts);
-
- po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
- po::notify(*conf);
-
- if (conf->count("help") || conf->count("aligned_corpus")==0) {
- cerr << "\nUsage: score_grammar -c ALIGNED_CORPUS.fr-en-al [-options] < grammar\n";
- cerr << dcmdline_options << endl;
- exit(1);
- }
-}
-namespace {
- inline bool IsWhitespace(char c) { return c == ' ' || c == '\t'; }
- inline bool IsBracket(char c){return c == '[' || c == ']';}
- inline void SkipWhitespace(const char* buf, int* ptr) {
- while (buf[*ptr] && IsWhitespace(buf[*ptr])) { ++(*ptr); }
- }
-}
-
-int ReadPhraseUntilDividerOrEnd(const char* buf, const int sstart, const int end, vector<WordID>* p) {
- static const WordID kDIV = TD::Convert("|||");
- int ptr = sstart;
- while(ptr < end) {
- while(ptr < end && IsWhitespace(buf[ptr])) { ++ptr; }
- int start = ptr;
- while(ptr < end && !IsWhitespace(buf[ptr])) { ++ptr; }
- if (ptr == start) {cerr << "Warning! empty token.\n"; return ptr; }
- const WordID w = TD::Convert(string(buf, start, ptr - start));
-
- if((IsBracket(buf[start]) and IsBracket(buf[ptr-1])) or( w == kDIV))
- p->push_back(1 * w);
- else {
- if (w == kDIV) return ptr;
- p->push_back(w);
- }
- }
- return ptr;
-}
-
-
-void ParseLine(const char* buf, vector<WordID>* cur_key, ID2RuleStatistics* counts) {
- static const WordID kDIV = TD::Convert("|||");
- counts->clear();
- int ptr = 0;
- while(buf[ptr] != 0 && buf[ptr] != '\t') { ++ptr; }
- if (buf[ptr] != '\t') {
- cerr << "Missing tab separator between key and value!\n INPUT=" << buf << endl;
- exit(1);
- }
- cur_key->clear();
- // key is: "[X] ||| word word word"
- int tmpp = ReadPhraseUntilDividerOrEnd(buf, 0, ptr, cur_key);
- cur_key->push_back(kDIV);
- ReadPhraseUntilDividerOrEnd(buf, tmpp, ptr, cur_key);
- ++ptr;
- int start = ptr;
- int end = ptr;
- int state = 0; // 0=reading label, 1=reading count
- vector<WordID> name;
- while(buf[ptr] != 0) {
- while(buf[ptr] != 0 && buf[ptr] != '|') { ++ptr; }
- if (buf[ptr] == '|') {
- ++ptr;
- if (buf[ptr] == '|') {
- ++ptr;
- if (buf[ptr] == '|') {
- ++ptr;
- end = ptr - 3;
- while (end > start && IsWhitespace(buf[end-1])) { --end; }
- if (start == end) {
- cerr << "Got empty token!\n LINE=" << buf << endl;
- exit(1);
- }
- switch (state) {
- case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
- case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
- default: cerr << "Can't happen\n"; abort();
- }
- SkipWhitespace(buf, &ptr);
- start = ptr;
- }
- }
- }
- }
- end=ptr;
- while (end > start && IsWhitespace(buf[end-1])) { --end; }
- if (end > start) {
- switch (state) {
- case 0: ++state; name.clear(); ReadPhraseUntilDividerOrEnd(buf, start, end, &name); break;
- case 1: --state; (*counts)[name].ParseRuleStatistics(buf, start, end); break;
- default: cerr << "Can't happen\n"; abort();
- }
- }
-}
-
-
-
-void LexTranslationTable::createTTable(const char* buf){
-
- bool DEBUG = false;
-
- AnnotatedParallelSentence sent;
-
- sent.ParseInputLine(buf);
-
- //iterate over the alignment to compute aligned words
-
- for(int i =0;i<sent.aligned.width();i++)
- {
- for (int j=0;j<sent.aligned.height();j++)
- {
- if (DEBUG) cerr << sent.aligned(i,j) << " ";
- if( sent.aligned(i,j))
- {
- if (DEBUG) cerr << TD::Convert(sent.f[i]) << " aligned to " << TD::Convert(sent.e[j]);
- ++word_translation[pair<WordID,WordID> (sent.f[i], sent.e[j])];
- ++total_foreign[sent.f[i]];
- ++total_english[sent.e[j]];
- }
- }
- if (DEBUG) cerr << endl;
- }
- if (DEBUG) cerr << endl;
-
- static const WordID NULL_ = TD::Convert("NULL");
- //handle unaligned words - align them to null
- for (int j =0; j < sent.e_len; j++)
- {
- if (sent.e_aligned[j]) continue;
- ++word_translation[pair<WordID,WordID> (NULL_, sent.e[j])];
- ++total_foreign[NULL_];
- ++total_english[sent.e[j]];
- }
-
- for (int i =0; i < sent.f_len; i++)
- {
- if (sent.f_aligned[i]) continue;
- ++word_translation[pair<WordID,WordID> (sent.f[i], NULL_)];
- ++total_english[NULL_];
- ++total_foreign[sent.f[i]];
- }
-
-}
-
-
-inline float safenlog(float v) {
- if (v == 1.0f) return 0.0f;
- float res = -log(v);
- if (res > 100.0f) res = 100.0f;
- return res;
-}
-
-int main(int argc, char** argv){
- po::variables_map conf;
- InitCommandLine(argc, argv, &conf);
- bool DEBUG= false;
- const int max_options = conf["top_e_given_f"].as<size_t>();;
- ifstream alignment (conf["aligned_corpus"].as<string>().c_str());
- istream& unscored_grammar = cin;
- ostream& scored_grammar = cout;
-
- //create lexical translation table
- cerr << "Creating table..." << endl;
- char* buf = new char[MAX_LINE_LENGTH];
-
- LexTranslationTable table;
-
- while(!alignment.eof())
- {
- alignment.getline(buf, MAX_LINE_LENGTH);
- if (buf[0] == 0) continue;
-
- table.createTTable(buf);
- }
-
- bool PRINT_TABLE=false;
- if (PRINT_TABLE)
- {
- ofstream trans_table;
- trans_table.open("lex_trans_table.out");
- for(map < pair<WordID,WordID>,int >::iterator it = table.word_translation.begin(); it != table.word_translation.end(); ++it)
- {
- trans_table << TD::Convert(it->first.first) << "|||" << TD::Convert(it->first.second) << "==" << it->second << "//" << table.total_foreign[it->first.first] << "//" << table.total_english[it->first.second] << endl;
- }
-
- trans_table.close();
- }
-
-
- //score unscored grammar
- cerr <<"Scoring grammar..." << endl;
-
- ID2RuleStatistics acc, cur_counts;
- vector<WordID> key, cur_key,temp_key;
- vector< pair<short,short> > al;
- vector< pair<short,short> >::iterator ita;
- int line = 0;
-
- static const int kCF = FD::Convert("CF");
- static const int kCE = FD::Convert("CE");
- static const int kCFE = FD::Convert("CFE");
-
- multimap<float, string> options;
- while(!unscored_grammar.eof())
- {
- ++line;
- options.clear();
- unscored_grammar.getline(buf, MAX_LINE_LENGTH);
- if (buf[0] == 0) continue;
- ParseLine(buf, &cur_key, &cur_counts);
- //loop over all the Target side phrases that this source aligns to
- for (ID2RuleStatistics::const_iterator it = cur_counts.begin(); it != cur_counts.end(); ++it)
- {
-
- /*Compute phrase translation prob.
- Print out scores in this format:
- Phrase trnaslation prob P(F|E)
- Phrase translation prob P(E|F)
- Lexical weighting prob lex(F|E)
- Lexical weighting prob lex(E|F)
- */
-
- float pEF_ = it->second.counts.value(kCFE) / it->second.counts.value(kCF);
- float pFE_ = it->second.counts.value(kCFE) / it->second.counts.value(kCE);
-
- map <WordID, pair<int, float> > foreign_aligned;
- map <WordID, pair<int, float> > english_aligned;
-
- //Loop over all the alignment points to compute lexical translation probability
- al = it->second.aligns;
- for(ita = al.begin(); ita != al.end(); ++ita)
- {
-
- if (DEBUG)
- {
- cerr << "\nA:" << ita->first << "," << ita->second << "::";
- cerr << TD::Convert(cur_key[ita->first + 2]) << "-" << TD::Convert(it->first[ita->second]);
- }
-
-
- //Lookup this alignment probability in the table
- int temp = table.word_translation[pair<WordID,WordID> (cur_key[ita->first+2],it->first[ita->second])];
- float f2e=0, e2f=0;
- if ( table.total_foreign[cur_key[ita->first+2]] != 0)
- f2e = (float) temp / table.total_foreign[cur_key[ita->first+2]];
- if ( table.total_english[it->first[ita->second]] !=0 )
- e2f = (float) temp / table.total_english[it->first[ita->second]];
- if (DEBUG) printf (" %d %E %E\n", temp, f2e, e2f);
-
-
- //local counts to keep track of which things haven't been aligned, to later compute their null alignment
- if (foreign_aligned.count(cur_key[ita->first+2]))
- {
- foreign_aligned[ cur_key[ita->first+2] ].first++;
- foreign_aligned[ cur_key[ita->first+2] ].second += e2f;
- }
- else
- foreign_aligned [ cur_key[ita->first+2] ] = pair<int,float> (1,e2f);
-
-
-
- if (english_aligned.count( it->first[ ita->second] ))
- {
- english_aligned[ it->first[ ita->second ]].first++;
- english_aligned[ it->first[ ita->second] ].second += f2e;
- }
- else
- english_aligned [ it->first[ ita->second] ] = pair<int,float> (1,f2e);
-
-
-
-
- }
-
- float final_lex_f2e=1, final_lex_e2f=1;
- static const WordID NULL_ = TD::Convert("NULL");
-
- //compute lexical weight P(F|E) and include unaligned foreign words
- for(int i=0;i<cur_key.size(); i++)
- {
-
- if (!table.total_foreign.count(cur_key[i])) continue; //if we dont have it in the translation table, we won't know its lexical weight
-
- if (foreign_aligned.count(cur_key[i]))
- {
- pair<int, float> temp_lex_prob = foreign_aligned[cur_key[i]];
- final_lex_e2f *= temp_lex_prob.second / temp_lex_prob.first;
- }
- else //dealing with null alignment
- {
- int temp_count = table.word_translation[pair<WordID,WordID> (cur_key[i],NULL_)];
- float temp_e2f = (float) temp_count / table.total_english[NULL_];
- final_lex_e2f *= temp_e2f;
- }
-
- }
-
- //compute P(E|F) unaligned english words
- for(int j=0; j< it->first.size(); j++)
- {
- if (!table.total_english.count(it->first[j])) continue;
-
- if (english_aligned.count(it->first[j]))
- {
- pair<int, float> temp_lex_prob = english_aligned[it->first[j]];
- final_lex_f2e *= temp_lex_prob.second / temp_lex_prob.first;
- }
- else //dealing with null
- {
- int temp_count = table.word_translation[pair<WordID,WordID> (NULL_,it->first[j])];
- float temp_f2e = (float) temp_count / table.total_foreign[NULL_];
- final_lex_f2e *= temp_f2e;
- }
- }
-
- ostringstream os;
- os << TD::GetString(cur_key)
- << ' ' << TD::GetString(it->first) << " |||"
- << " FGivenE=" << safenlog(pFE_) << " EGivenF=" << safenlog(pEF_)
- << " LexE2F=" << safenlog(final_lex_e2f) << " LexF2E=" << safenlog(final_lex_f2e) << endl;
- options.insert(pair<float,string>(-pEF_, os.str()));
- }
- int ocount = 0;
- for (multimap<float,string>::iterator it = options.begin(); it != options.end(); ++it) {
- scored_grammar << it->second;
- ++ocount;
- if (ocount == max_options) break;
- }
- }
-}
-
diff --git a/gi/pipeline/filter-for-test-set.pl b/gi/pipeline/filter-for-test-set.pl
deleted file mode 100755
index 1747c603..00000000
--- a/gi/pipeline/filter-for-test-set.pl
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/perl -w
-use strict;
-my $SCRIPT_DIR; BEGIN { use Cwd qw/ abs_path cwd /; use File::Basename; $SCRIPT_DIR = dirname(abs_path($0)); push @INC, $SCRIPT_DIR; }
-
-my $GZIP = 'gzip';
-my $ZCAT = 'gunzip -c';
-
-my $EXTOOLS = "$SCRIPT_DIR/../../extools";
-die "Can't find extools: $EXTOOLS" unless -e $EXTOOLS && -d $EXTOOLS;
-
-my $FILTER = "$EXTOOLS/filter_grammar";
-my $SCORE = "$EXTOOLS/score_grammar";
-
-assert_exec($FILTER, $SCORE);
-
-usage() unless scalar @ARGV == 3;
-my $corpus = $ARGV[0];
-my $grammar = $ARGV[1];
-my $testset = $ARGV[2];
-die "Can't find corpus: $corpus" unless -f $corpus;
-die "Can't find corpus: $grammar" unless -f $grammar;
-die "Can't find corpus: $testset" unless -f $testset;
-print STDERR " CORPUS: $corpus\n";
-print STDERR " GRAMMAR: $corpus\n";
-print STDERR "TEST SET: $corpus\n";
-print STDERR "Extracting...\n";
-
-safesystem("$ZCAT $grammar | $FILTER $testset | $SCORE -c $corpus") or die "Failed";
-
-sub usage {
- print <<EOT;
-
-Usage: $0 corpus.src_trg_al grammar.gz test-set.txt > filtered-grammar.scfg.txt
-
-Filter and score a grammar for a test set.
-
-EOT
- exit 1;
-};
-
-sub assert_exec {
- my @files = @_;
- for my $file (@files) {
- die "Can't find $file - did you run make?\n" unless -e $file;
- die "Can't execute $file" unless -e $file;
- }
-};
-
-sub safesystem {
- print STDERR "Executing: @_\n";
- system(@_);
- if ($? == -1) {
- print STDERR "ERROR: Failed to execute: @_\n $!\n";
- exit(1);
- }
- elsif ($? & 127) {
- printf STDERR "ERROR: Execution of: @_\n died with signal %d, %s coredump\n",
- ($? & 127), ($? & 128) ? 'with' : 'without';
- exit(1);
- }
- else {
- my $exitcode = $? >> 8;
- print STDERR "Exit code: $exitcode\n" if $exitcode;
- return ! $exitcode;
- }
-}
-
-
diff --git a/gi/pipeline/local-gi-pipeline.pl b/gi/pipeline/local-gi-pipeline.pl
index e66ca602..acd6b94c 100755
--- a/gi/pipeline/local-gi-pipeline.pl
+++ b/gi/pipeline/local-gi-pipeline.pl
@@ -28,11 +28,9 @@ my $PYP_TOPICS_TRAIN="$PYPTOOLS/pyp-contexts-train";
my $SORT_KEYS = "$SCRIPT_DIR/scripts/sort-by-key.sh";
my $EXTRACTOR = "$EXTOOLS/extractor";
-my $FILTER = "$EXTOOLS/filter_grammar";
-my $SCORER = "$EXTOOLS/score_grammar";
my $TOPIC_TRAIN = "$PYPTOOLS/pyp-contexts-train";
-assert_exec($SORT_KEYS, $REDUCER, $EXTRACTOR, $FILTER, $SCORER, $PYP_TOPICS_TRAIN, $S2L, $C2D, $TOPIC_TRAIN);
+assert_exec($SORT_KEYS, $REDUCER, $EXTRACTOR, $PYP_TOPICS_TRAIN, $S2L, $C2D, $TOPIC_TRAIN);
my $OUTPUT = './giwork';
@@ -67,7 +65,7 @@ if ($BIDIR) {
$res = grammar_extract();
}
print STDERR "\n!!!COMPLETE!!!\n";
-print STDERR "GRAMMAR: $res\n\nYou should probably run:\n\n $SCRIPT_DIR/filter-for-test-set.pl $CORPUS $res TESTSET.TXT > filtered-grammar.scfg\n\n";
+print STDERR "GRAMMAR: $res\n\nYou should probably run:\n\n zcat $res | $SCRIPT_DIR/../../extools/filter_score_grammar -c $CORPUS -t TESTSET.TXT > filtered-grammar.scfg\n\n";
exit 0;
sub context_dir {