From c398cef915ea7037c91066b6bfc19d915cac498b Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 10 Sep 2013 18:20:16 +0200
Subject: simple pclr
---
training/dtrain/dtrain.cc | 25 ++++++++++++++++++++-----
1 file changed, 20 insertions(+), 5 deletions(-)
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 0ee2f124..34c0a54a 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -40,6 +40,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
("scale_bleu_diff", po::value()->zero_tokens(), "learning rate <- bleu diff of a misranked pair")
("loss_margin", po::value()->default_value(0.), "update if no error in pref pair but model scores this near")
("max_pairs", po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.")
+ ("pclr", po::value()->zero_tokens(), "use a (simple) per-coordinate learning rate")
("noup", po::value()->zero_tokens(), "do not update weights");
po::options_description cl("Command Line Options");
cl.add_options()
@@ -124,6 +125,8 @@ main(int argc, char** argv)
if (loss_margin > 9998.) loss_margin = std::numeric_limits::max();
bool scale_bleu_diff = false;
if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
+ bool pclr = false;
+ if (cfg.count("pclr")) pclr = true;
bool average = false;
if (select_weights == "avg")
average = true;
@@ -131,7 +134,6 @@ main(int argc, char** argv)
if (cfg.count("print_weights"))
boost::split(print_weights, cfg["print_weights"].as(), boost::is_any_of(" "));
-
// setup decoder
register_feature_functions();
SetSilent(true);
@@ -249,6 +251,8 @@ main(int argc, char** argv)
cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as() << "'" << endl;
if (rescale)
cerr << setw(25) << "rescale " << rescale << endl;
+ if (pclr)
+ cerr << setw(25) << "pclr " << pclr << endl;
cerr << setw(25) << "max pairs " << max_pairs << endl;
cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
@@ -261,6 +265,8 @@ main(int argc, char** argv)
if (!verbose) cerr << "(a dot represents " << DTRAIN_DOTS << " inputs)" << endl;
}
+ // pclr
+ SparseVector learning_rates;
for (unsigned t = 0; t < T; t++) // T epochs
{
@@ -385,7 +391,16 @@ main(int argc, char** argv)
if (scale_bleu_diff) eta = it->first.score - it->second.score;
if (rank_error || margin < loss_margin) {
SparseVector diff_vec = it->first.f - it->second.f;
- lambdas.plus_eq_v_times_s(diff_vec, eta);
+ if (pclr) {
+ SparseVector::iterator jt = diff_vec.begin();
+ for (; jt != diff_vec.end(); ++it) {
+ jt->second *= max(0.0000001, eta/(eta+learning_rates[jt->first])); // FIXME
+ learning_rates[jt->first]++;
+ }
+ lambdas += diff_vec;
+ } else {
+ lambdas.plus_eq_v_times_s(diff_vec, eta);
+ }
if (gamma)
lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
}
@@ -395,14 +410,14 @@ main(int argc, char** argv)
// please note that this regularizations happen
// after a _sentence_ -- not after each example/pair!
if (l1naive) {
- FastSparseVector::iterator it = lambdas.begin();
+ SparseVector::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
it->second -= sign(it->second) * l1_reg;
}
}
} else if (l1clip) {
- FastSparseVector::iterator it = lambdas.begin();
+ SparseVector::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
if (it->second != 0) {
@@ -417,7 +432,7 @@ main(int argc, char** argv)
}
} else if (l1cumul) {
weight_t acc_penalty = (ii+1) * l1_reg; // ii is the index of the current input
- FastSparseVector::iterator it = lambdas.begin();
+ SparseVector::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
if (it->second != 0) {
--
cgit v1.2.3
From 451d0c7e865cdea9da6a0fb747782886b49eeeef Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 10 Sep 2013 19:54:40 +0200
Subject: do pclr after sentences..
---
training/dtrain/dtrain.cc | 36 ++++++++++++++++++++--------
training/dtrain/examples/standard/dtrain.ini | 3 ++-
2 files changed, 28 insertions(+), 11 deletions(-)
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 34c0a54a..2d090666 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -372,7 +372,8 @@ main(int argc, char** argv)
PROsampling(samples, pairs, pair_threshold, max_pairs);
npairs += pairs.size();
- SparseVector lambdas_copy;
+ SparseVector lambdas_copy; // for l1 regularization
+ SparseVector sum_up; // for pclr
if (l1naive||l1clip||l1cumul) lambdas_copy = lambdas;
for (vector >::iterator it = pairs.begin();
@@ -392,20 +393,24 @@ main(int argc, char** argv)
if (rank_error || margin < loss_margin) {
SparseVector diff_vec = it->first.f - it->second.f;
if (pclr) {
- SparseVector::iterator jt = diff_vec.begin();
- for (; jt != diff_vec.end(); ++it) {
- jt->second *= max(0.0000001, eta/(eta+learning_rates[jt->first])); // FIXME
- learning_rates[jt->first]++;
- }
- lambdas += diff_vec;
- } else {
- lambdas.plus_eq_v_times_s(diff_vec, eta);
- }
+ sum_up += diff_vec;
+ } else {
+ lambdas.plus_eq_v_times_s(diff_vec, eta);
+ }
if (gamma)
lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
}
}
+ // per-coordinate learning rate
+ if (pclr) {
+ SparseVector::iterator it = sum_up.begin();
+ for (; it != lambdas.end(); ++it) {
+ lambdas[it->first] += it->second * max(0.00000001, eta/(eta+learning_rates[it->first]));
+ learning_rates[it->first]++;
+ }
+ }
+
// l1 regularization
// please note that this regularizations happen
// after a _sentence_ -- not after each example/pair!
@@ -413,6 +418,8 @@ main(int argc, char** argv)
SparseVector::iterator it = lambdas.begin();
for (; it != lambdas.end(); ++it) {
if (!lambdas_copy.get(it->first) || lambdas_copy.get(it->first)!=it->second) {
+ it->second *= max(0.0000001, eta/(eta+learning_rates[it->first])); // FIXME
+ learning_rates[it->first]++;
it->second -= sign(it->second) * l1_reg;
}
}
@@ -530,6 +537,15 @@ main(int argc, char** argv)
Weights::WriteToFile(w_fn, dense_weights, true);
}
+ WriteFile of("-");
+ ostream& o = *of.stream();
+ o << "<<<<<<<<<<<<<<<<<<<<<<<<\n";
+ for (SparseVector::iterator it = learning_rates.begin(); it != learning_rates.end(); ++it) {
+ if (it->second == 0) continue;
+ o << FD::Convert(it->first) << '\t' << it->second << endl;
+ }
+ o << ">>>>>>>>>>>>>>>>>>>>>>>>>\n";
+
} // outer loop
if (average) w_average /= (weight_t)T;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 23e94285..07350a0b 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,6 +1,6 @@
input=./nc-wmt11.de.gz
refs=./nc-wmt11.en.gz
-output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
+output=asdf # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
# weights for these features will be printed on each iteration
@@ -22,3 +22,4 @@ pair_sampling=XYX #
hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (here: > 0)
loss_margin=0 # update if correctly ranked, but within this margin
+pclr=1
--
cgit v1.2.3
From 04784fd692c58e9bb45708b07f33831c9888903b Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 10 Sep 2013 20:03:22 +0200
Subject: rm debug stuff
---
training/dtrain/dtrain.cc | 9 ---------
training/dtrain/examples/standard/dtrain.ini | 1 -
2 files changed, 10 deletions(-)
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 2d090666..5dfd6286 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -537,15 +537,6 @@ main(int argc, char** argv)
Weights::WriteToFile(w_fn, dense_weights, true);
}
- WriteFile of("-");
- ostream& o = *of.stream();
- o << "<<<<<<<<<<<<<<<<<<<<<<<<\n";
- for (SparseVector::iterator it = learning_rates.begin(); it != learning_rates.end(); ++it) {
- if (it->second == 0) continue;
- o << FD::Convert(it->first) << '\t' << it->second << endl;
- }
- o << ">>>>>>>>>>>>>>>>>>>>>>>>>\n";
-
} // outer loop
if (average) w_average /= (weight_t)T;
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index 07350a0b..c0912a62 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -22,4 +22,3 @@ pair_sampling=XYX #
hi_lo=0.1 # 10 vs 80 vs 10 and 80 vs 10 here
pair_threshold=0 # minimum distance in BLEU (here: > 0)
loss_margin=0 # update if correctly ranked, but within this margin
-pclr=1
--
cgit v1.2.3
From b2891df4bc4429fbeec503279fd19e7fafe04a24 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 17 Sep 2013 21:18:27 +0200
Subject: separate inis for shards
---
training/dtrain/dtrain.cc | 2 +-
training/dtrain/parallelize.rb | 13 ++++++++++++-
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 2d090666..4521e794 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -406,8 +406,8 @@ main(int argc, char** argv)
if (pclr) {
SparseVector::iterator it = sum_up.begin();
for (; it != lambdas.end(); ++it) {
- lambdas[it->first] += it->second * max(0.00000001, eta/(eta+learning_rates[it->first]));
learning_rates[it->first]++;
+ lambdas[it->first] += it->second / learning_rates[it->first]; //* max(0.00000001, eta/(eta+learning_rates[it->first]));
}
}
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 285f3c9b..66a61b3d 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -21,6 +21,7 @@ opts = Trollop::options do
opt :qsub, "use qsub", :type => :bool, :default => false
opt :dtrain_binary, "path to dtrain binary", :type => :string
opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
+ opt :per_shard_decoder_configs, "give special decoder config per shard", :type => string
end
usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
@@ -41,9 +42,11 @@ epochs = opts[:epochs]
rand = opts[:randomize]
reshard = opts[:reshard]
predefined_shards = false
+per_shard_decoder_configs = false
if opts[:shards] == 0
predefined_shards = true
num_shards = 0
+ per_shard_decoder_configs = true if opts[:per_shard_decoder_configs]
else
num_shards = opts[:shards]
end
@@ -101,6 +104,9 @@ refs_files = []
if predefined_shards
input_files = File.new(input).readlines.map {|i| i.strip }
refs_files = File.new(refs).readlines.map {|i| i.strip }
+ if per_shard_decoder_configs
+ decoder_configs = File.new(opts[:per_shard_decoder_configs]).readlines.map {|i| i.strip}
+ end
num_shards = input_files.size
else
input_files, refs_files = make_shards input, refs, num_shards, 0, rand
@@ -126,8 +132,13 @@ end
else
local_end = "2>work/out.#{shard}.#{epoch}"
end
+ if per_shard_decoder_configs
+ cdec_cfg = "--decoder_config #{decoder_configs[shard]}"
+ else
+ cdec_cfg = ""
+ end
pids << Kernel.fork {
- `#{qsub_str_start}#{dtrain_bin} -c #{ini}\
+ `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg}\
--input #{input_files[shard]}\
--refs #{refs_files[shard]} #{input_weights}\
--output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`
--
cgit v1.2.3
From 2e746d6ad25aaf4d85f9c8f277ff109e45bfd93e Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Fri, 20 Sep 2013 20:01:03 +0200
Subject: loo
---
extractor/grammar_extractor.cc | 6 ++++--
extractor/grammar_extractor.h | 3 ++-
extractor/rule_factory.cc | 5 +++--
extractor/rule_factory.h | 3 ++-
extractor/run_extractor.cc | 13 +++++++++++--
extractor/sample_alignment.txt | 3 +++
extractor/sample_bitext.txt | 3 +++
extractor/sampler.cc | 35 ++++++++++++++++++++++++++++++-----
extractor/sampler.h | 5 ++++-
9 files changed, 62 insertions(+), 14 deletions(-)
diff --git a/extractor/grammar_extractor.cc b/extractor/grammar_extractor.cc
index 8050ce7b..1fbdee5b 100644
--- a/extractor/grammar_extractor.cc
+++ b/extractor/grammar_extractor.cc
@@ -3,11 +3,13 @@
#include
#include
#include
+#include
#include "grammar.h"
#include "rule.h"
#include "rule_factory.h"
#include "vocabulary.h"
+#include "data_array.h"
using namespace std;
@@ -32,10 +34,10 @@ GrammarExtractor::GrammarExtractor(
vocabulary(vocabulary),
rule_factory(rule_factory) {}
-Grammar GrammarExtractor::GetGrammar(const string& sentence) {
+Grammar GrammarExtractor::GetGrammar(const string& sentence, const unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array) {
vector words = TokenizeSentence(sentence);
vector word_ids = AnnotateWords(words);
- return rule_factory->GetGrammar(word_ids);
+ return rule_factory->GetGrammar(word_ids, blacklisted_sentence_ids, source_data_array);
}
vector GrammarExtractor::TokenizeSentence(const string& sentence) {
diff --git a/extractor/grammar_extractor.h b/extractor/grammar_extractor.h
index b36ceeb9..6c0aafbf 100644
--- a/extractor/grammar_extractor.h
+++ b/extractor/grammar_extractor.h
@@ -4,6 +4,7 @@
#include
#include
#include
+#include
using namespace std;
@@ -44,7 +45,7 @@ class GrammarExtractor {
// Converts the sentence to a vector of word ids and uses the RuleFactory to
// extract the SCFG rules which may be used to decode the sentence.
- Grammar GetGrammar(const string& sentence);
+ Grammar GetGrammar(const string& sentence, const unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array);
private:
// Splits the sentence in a vector of words.
diff --git a/extractor/rule_factory.cc b/extractor/rule_factory.cc
index 8c30fb9e..e52019ae 100644
--- a/extractor/rule_factory.cc
+++ b/extractor/rule_factory.cc
@@ -17,6 +17,7 @@
#include "suffix_array.h"
#include "time_util.h"
#include "vocabulary.h"
+#include "data_array.h"
using namespace std;
using namespace chrono;
@@ -100,7 +101,7 @@ HieroCachingRuleFactory::HieroCachingRuleFactory() {}
HieroCachingRuleFactory::~HieroCachingRuleFactory() {}
-Grammar HieroCachingRuleFactory::GetGrammar(const vector& word_ids) {
+Grammar HieroCachingRuleFactory::GetGrammar(const vector& word_ids, const unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array) {
Clock::time_point start_time = Clock::now();
double total_extract_time = 0;
double total_intersect_time = 0;
@@ -192,7 +193,7 @@ Grammar HieroCachingRuleFactory::GetGrammar(const vector& word_ids) {
Clock::time_point extract_start = Clock::now();
if (!state.starts_with_x) {
// Extract rules for the sampled set of occurrences.
- PhraseLocation sample = sampler->Sample(next_node->matchings);
+ PhraseLocation sample = sampler->Sample(next_node->matchings, blacklisted_sentence_ids, source_data_array);
vector new_rules =
rule_extractor->ExtractRules(next_phrase, sample);
rules.insert(rules.end(), new_rules.begin(), new_rules.end());
diff --git a/extractor/rule_factory.h b/extractor/rule_factory.h
index 52e8712a..c7332720 100644
--- a/extractor/rule_factory.h
+++ b/extractor/rule_factory.h
@@ -3,6 +3,7 @@
#include
#include
+#include
#include "matchings_trie.h"
@@ -71,7 +72,7 @@ class HieroCachingRuleFactory {
// Constructs SCFG rules for a given sentence.
// (See class description for more details.)
- virtual Grammar GetGrammar(const vector& word_ids);
+ virtual Grammar GetGrammar(const vector& word_ids, const unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array);
protected:
HieroCachingRuleFactory();
diff --git a/extractor/run_extractor.cc b/extractor/run_extractor.cc
index 8a9ca89d..6eb55073 100644
--- a/extractor/run_extractor.cc
+++ b/extractor/run_extractor.cc
@@ -75,7 +75,9 @@ int main(int argc, char** argv) {
("max_samples", po::value()->default_value(300),
"Maximum number of samples")
("tight_phrases", po::value()->default_value(true),
- "False if phrases may be loose (better, but slower)");
+ "False if phrases may be loose (better, but slower)")
+ ("leave_one_out", po::value()->zero_tokens(),
+ "do leave-one-out estimation of grammars (e.g. for extracting grammars for the training set");
po::variables_map vm;
po::store(po::parse_command_line(argc, argv, desc), vm);
@@ -96,6 +98,11 @@ int main(int argc, char** argv) {
return 1;
}
+ bool leave_one_out = false;
+ if (vm.count("leave_one_out")) {
+ leave_one_out = true;
+ }
+
int num_threads = vm["threads"].as();
cerr << "Grammar extraction will use " << num_threads << " threads." << endl;
@@ -223,7 +230,9 @@ int main(int argc, char** argv) {
}
suffixes[i] = suffix;
- Grammar grammar = extractor.GetGrammar(sentences[i]);
+ unordered_set blacklisted_sentence_ids;
+ if (leave_one_out) blacklisted_sentence_ids.insert(i);
+ Grammar grammar = extractor.GetGrammar(sentences[i], blacklisted_sentence_ids, source_data_array);
ofstream output(GetGrammarFilePath(grammar_path, i).c_str());
output << grammar;
}
diff --git a/extractor/sample_alignment.txt b/extractor/sample_alignment.txt
index 80b446a4..f0292b01 100644
--- a/extractor/sample_alignment.txt
+++ b/extractor/sample_alignment.txt
@@ -1,2 +1,5 @@
0-0 1-1 2-2
1-0 2-1
+0-0
+0-0 1-1
+0-0 1-1
diff --git a/extractor/sample_bitext.txt b/extractor/sample_bitext.txt
index 93d6b39d..2b7c8e40 100644
--- a/extractor/sample_bitext.txt
+++ b/extractor/sample_bitext.txt
@@ -1,2 +1,5 @@
+asdf ||| dontseeme
+qqq asdf ||| zzz fdsa
+asdf qqq ||| fdsa zzz
ana are mere . ||| anna has apples .
ana bea mult lapte . ||| anna drinks a lot of milk .
diff --git a/extractor/sampler.cc b/extractor/sampler.cc
index d81956b5..2f7738db 100644
--- a/extractor/sampler.cc
+++ b/extractor/sampler.cc
@@ -12,18 +12,43 @@ Sampler::Sampler() {}
Sampler::~Sampler() {}
-PhraseLocation Sampler::Sample(const PhraseLocation& location) const {
+PhraseLocation Sampler::Sample(const PhraseLocation& location, unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array) const {
vector sample;
int num_subpatterns;
if (location.matchings == NULL) {
// Sample suffix array range.
num_subpatterns = 1;
int low = location.sa_low, high = location.sa_high;
- double step = max(1.0, (double) (high - low) / max_samples);
- for (double i = low; i < high && sample.size() < max_samples; i += step) {
- sample.push_back(suffix_array->GetSuffix(Round(i)));
+ double step = Round(max(1.0, (double) (high - low) / max_samples));
+ int i = location.sa_low;
+ bool found = false;
+ while (sample.size() < max_samples && i <= location.sa_high) {
+ int x = suffix_array->GetSuffix(i);
+ int id = source_data_array->GetSentenceId(x);
+ if (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) != blacklisted_sentence_ids.end()) {
+ int backoff_step = 1;
+ while (true) {
+ int j = i - backoff_step;
+ x = suffix_array->GetSuffix(j);
+ id = source_data_array->GetSentenceId(x);
+ if ((j >= location.sa_low) && (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end())
+ && (find(sample.begin(), sample.end(), x) == sample.end())) { found = true; break; }
+ int k = i + backoff_step;
+ x = suffix_array->GetSuffix(k);
+ id = source_data_array->GetSentenceId(x);
+ if ((k <= location.sa_high) && (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end())
+ && (find(sample.begin(), sample.end(), x) == sample.end())) { found = true; break; }
+ if (j <= location.sa_low && k >= location.sa_high) break;
+ backoff_step++;
+ }
+ } else {
+ found = true;
+ }
+ if (found && (find(sample.begin(), sample.end(), x) == sample.end())) sample.push_back(x);
+ i += step;
+ found = false;
}
- } else {
+ } else { // when do we get here?
// Sample vector of occurrences.
num_subpatterns = location.num_subpatterns;
int num_matchings = location.matchings->size() / num_subpatterns;
diff --git a/extractor/sampler.h b/extractor/sampler.h
index be4aa1bb..30e747fd 100644
--- a/extractor/sampler.h
+++ b/extractor/sampler.h
@@ -2,6 +2,9 @@
#define _SAMPLER_H_
#include
+#include
+
+#include "data_array.h"
using namespace std;
@@ -20,7 +23,7 @@ class Sampler {
virtual ~Sampler();
// Samples uniformly at most max_samples phrase occurrences.
- virtual PhraseLocation Sample(const PhraseLocation& location) const;
+ virtual PhraseLocation Sample(const PhraseLocation& location, const unordered_set blacklisted_sentence_ids, const shared_ptr source_data_array) const;
protected:
Sampler();
--
cgit v1.2.3
From 0087a3d427cbe0cfb20548a496124ce7d857da8f Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Fri, 20 Sep 2013 20:10:19 +0200
Subject: example file
---
extractor/sample_source.txt | 5 +++++
1 file changed, 5 insertions(+)
create mode 100644 extractor/sample_source.txt
diff --git a/extractor/sample_source.txt b/extractor/sample_source.txt
new file mode 100644
index 00000000..9b46dd6a
--- /dev/null
+++ b/extractor/sample_source.txt
@@ -0,0 +1,5 @@
+asdf
+qqq asdf
+asdf qqq
+ana are mere .
+ana bea mult lapte .
--
cgit v1.2.3
From a08465b90027cc6f1d17daae2992e67368eeedee Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 24 Sep 2013 15:50:03 +0200
Subject: loo #2
---
extractor/sampler.cc | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/extractor/sampler.cc b/extractor/sampler.cc
index 2f7738db..cb470962 100644
--- a/extractor/sampler.cc
+++ b/extractor/sampler.cc
@@ -20,35 +20,39 @@ PhraseLocation Sampler::Sample(const PhraseLocation& location, unordered_setGetSuffix(i);
int id = source_data_array->GetSentenceId(x);
if (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) != blacklisted_sentence_ids.end()) {
+ found = false;
int backoff_step = 1;
while (true) {
+ if ((double)backoff_step >= step) break;
int j = i - backoff_step;
x = suffix_array->GetSuffix(j);
id = source_data_array->GetSentenceId(x);
- if ((j >= location.sa_low) && (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end())
- && (find(sample.begin(), sample.end(), x) == sample.end())) { found = true; break; }
+ if (j > last && find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end()) {
+ found = true; last = i; break;
+ }
int k = i + backoff_step;
x = suffix_array->GetSuffix(k);
id = source_data_array->GetSentenceId(x);
- if ((k <= location.sa_high) && (find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end())
- && (find(sample.begin(), sample.end(), x) == sample.end())) { found = true; break; }
- if (j <= location.sa_low && k >= location.sa_high) break;
+ if (k < min(i+step, (double)high) && find(blacklisted_sentence_ids.begin(), blacklisted_sentence_ids.end(), id) == blacklisted_sentence_ids.end()) {
+ found = true; last = k; break;
+ }
+ if (j <= last && k >= high) break;
backoff_step++;
}
} else {
found = true;
+ last = i;
}
- if (found && (find(sample.begin(), sample.end(), x) == sample.end())) sample.push_back(x);
+ if (found) sample.push_back(x);
i += step;
- found = false;
}
- } else { // when do we get here?
+ } else {
// Sample vector of occurrences.
num_subpatterns = location.num_subpatterns;
int num_matchings = location.matchings->size() / num_subpatterns;
--
cgit v1.2.3
From e11bbf4e4afb3e90710e45eb5cc7dff89bb559bc Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Wed, 25 Sep 2013 19:27:44 +0200
Subject: fix
---
training/dtrain/parallelize.rb | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 66a61b3d..2fc66cab 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -21,7 +21,7 @@ opts = Trollop::options do
opt :qsub, "use qsub", :type => :bool, :default => false
opt :dtrain_binary, "path to dtrain binary", :type => :string
opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
- opt :per_shard_decoder_configs, "give special decoder config per shard", :type => string
+ opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => :o
end
usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
--
cgit v1.2.3
From 5fc77937dde48dddde264261cb773b07fe7cd560 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Tue, 8 Oct 2013 13:57:45 +0200
Subject: dtrain: added pclr variants and new expected-output; fixed bug in
soft syntax features
---
decoder/ff_soft_syntax.cc | 2 +-
training/dtrain/dtrain.cc | 31 +++---
training/dtrain/examples/standard/dtrain.ini | 6 +-
training/dtrain/examples/standard/expected-output | 115 +++++++++++++---------
training/dtrain/parallelize.rb | 11 ++-
5 files changed, 101 insertions(+), 64 deletions(-)
diff --git a/decoder/ff_soft_syntax.cc b/decoder/ff_soft_syntax.cc
index 9981fa45..d84f2e6d 100644
--- a/decoder/ff_soft_syntax.cc
+++ b/decoder/ff_soft_syntax.cc
@@ -21,7 +21,7 @@ using namespace std;
struct SoftSyntacticFeaturesImpl {
SoftSyntacticFeaturesImpl(const string& param) {
vector labels = SplitOnWhitespace(param);
- for (unsigned int i = 0; i < labels.size(); i++)
+ //for (unsigned int i = 0; i < labels.size(); i++)
//cerr << "Labels: " << labels.at(i) << endl;
for (unsigned int i = 0; i < labels.size(); i++) {
string label = labels.at(i);
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 9d60a903..38a9b69a 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -40,7 +40,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
("scale_bleu_diff", po::value()->zero_tokens(), "learning rate <- bleu diff of a misranked pair")
("loss_margin", po::value()->default_value(0.), "update if no error in pref pair but model scores this near")
("max_pairs", po::value()->default_value(std::numeric_limits::max()), "max. # of pairs per Sent.")
- ("pclr", po::value()->zero_tokens(), "use a (simple) per-coordinate learning rate")
+ ("pclr", po::value()->default_value("no"), "use a (simple|adagrad) per-coordinate learning rate")
("noup", po::value()->zero_tokens(), "do not update weights");
po::options_description cl("Command Line Options");
cl.add_options()
@@ -125,8 +125,7 @@ main(int argc, char** argv)
if (loss_margin > 9998.) loss_margin = std::numeric_limits::max();
bool scale_bleu_diff = false;
if (cfg.count("scale_bleu_diff")) scale_bleu_diff = true;
- bool pclr = false;
- if (cfg.count("pclr")) pclr = true;
+ const string pclr = cfg["pclr"].as();
bool average = false;
if (select_weights == "avg")
average = true;
@@ -190,7 +189,6 @@ main(int argc, char** argv)
weight_t gamma = cfg["gamma"].as();
// faster perceptron: consider only misranked pairs, see
- // DO NOT ENABLE WITH SVM (gamma > 0) OR loss_margin!
bool faster_perceptron = false;
if (gamma==0 && loss_margin==0) faster_perceptron = true;
@@ -251,8 +249,7 @@ main(int argc, char** argv)
cerr << setw(25) << "l1 reg " << l1_reg << " '" << cfg["l1_reg"].as() << "'" << endl;
if (rescale)
cerr << setw(25) << "rescale " << rescale << endl;
- if (pclr)
- cerr << setw(25) << "pclr " << pclr << endl;
+ cerr << setw(25) << "pclr " << pclr << endl;
cerr << setw(25) << "max pairs " << max_pairs << endl;
cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
@@ -392,22 +389,30 @@ main(int argc, char** argv)
if (scale_bleu_diff) eta = it->first.score - it->second.score;
if (rank_error || margin < loss_margin) {
SparseVector diff_vec = it->first.f - it->second.f;
- if (pclr) {
+ if (pclr != "no") {
sum_up += diff_vec;
} else {
lambdas.plus_eq_v_times_s(diff_vec, eta);
+ if (gamma) lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs)); // FIXME
}
- if (gamma)
- lambdas.plus_eq_v_times_s(lambdas, -2*gamma*eta*(1./npairs));
}
}
// per-coordinate learning rate
- if (pclr) {
+ if (pclr != "no") {
SparseVector::iterator it = sum_up.begin();
- for (; it != lambdas.end(); ++it) {
- learning_rates[it->first]++;
- lambdas[it->first] += it->second / learning_rates[it->first]; //* max(0.00000001, eta/(eta+learning_rates[it->first]));
+ for (; it != sum_up.end(); ++it) {
+ if (pclr == "simple") {
+ lambdas[it->first] += it->second / max(1.0, learning_rates[it->first]);
+ learning_rates[it->first]++;
+ } else if (pclr == "adagrad") {
+ if (learning_rates[it->first] == 0) {
+ lambdas[it->first] += it->second * eta;
+ } else {
+ lambdas[it->first] += it->second * eta * learning_rates[it->first];
+ }
+ learning_rates[it->first] += pow(it->second, 2.0);
+ }
}
}
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index c0912a62..e6d6382e 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,6 +1,6 @@
input=./nc-wmt11.de.gz
refs=./nc-wmt11.en.gz
-output=asdf # a weights file (add .gz for gzip compression) or STDOUT '-'
+output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
# weights for these features will be printed on each iteration
@@ -10,11 +10,11 @@ print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 Phr
stop_after=10 # stop epoch after 10 inputs
# interesting stuff
-epochs=2 # run over input 2 times
+epochs=3 # run over input 3 times
k=100 # use 100best lists
N=4 # optimize (approx) BLEU4
scorer=fixed_stupid_bleu # use 'stupid' BLEU+1
-learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron)
+learning_rate=1.0 # learning rate, don't care if gamma=0 (perceptron) and loss_margin=0 (not margin perceptron)
gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
diff --git a/training/dtrain/examples/standard/expected-output b/training/dtrain/examples/standard/expected-output
index 21f91244..a35bbe6f 100644
--- a/training/dtrain/examples/standard/expected-output
+++ b/training/dtrain/examples/standard/expected-output
@@ -4,13 +4,13 @@ Reading ./nc-wmt11.en.srilm.gz
----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
****************************************************************************************************
Example feature: Shape_S00000_T00000
-Seeding random number sequence to 970626287
+Seeding random number sequence to 4049211323
dtrain
Parameters:
k 100
N 4
- T 2
+ T 3
scorer 'fixed_stupid_bleu'
sample from 'kbest'
filter 'uniq'
@@ -23,6 +23,7 @@ Parameters:
pair threshold 0
select weights 'VOID'
l1 reg 0 'none'
+ pclr no
max pairs 4294967295
cdec cfg './cdec.ini'
input './nc-wmt11.de.gz'
@@ -30,62 +31,88 @@ Parameters:
output '-'
stop_after 10
(a dot represents 10 inputs)
-Iteration #1 of 2.
+Iteration #1 of 3.
. 10
Stopping after 10 input sentences.
WEIGHTS
- Glue = -614
- WordPenalty = +1256.8
- LanguageModel = +5610.5
- LanguageModel_OOV = -1449
- PhraseModel_0 = -2107
- PhraseModel_1 = -4666.1
- PhraseModel_2 = -2713.5
- PhraseModel_3 = +4204.3
- PhraseModel_4 = -1435.8
- PhraseModel_5 = +916
- PhraseModel_6 = +190
- PassThrough = -2527
+ Glue = -1100
+ WordPenalty = -82.082
+ LanguageModel = -3199.1
+ LanguageModel_OOV = -192
+ PhraseModel_0 = +3128.2
+ PhraseModel_1 = -1610.2
+ PhraseModel_2 = -4336.5
+ PhraseModel_3 = +2910.3
+ PhraseModel_4 = +2523.2
+ PhraseModel_5 = +506
+ PhraseModel_6 = +1467
+ PassThrough = -387
---
- 1best avg score: 0.17874 (+0.17874)
- 1best avg model score: 88399 (+88399)
- avg # pairs: 798.2 (meaningless)
- avg # rank err: 798.2
+ 1best avg score: 0.16966 (+0.16966)
+ 1best avg model score: 2.9874e+05 (+2.9874e+05)
+ avg # pairs: 906.3 (meaningless)
+ avg # rank err: 906.3
avg # margin viol: 0
- non0 feature count: 887
+ non0 feature count: 825
avg list sz: 91.3
- avg f count: 126.85
-(time 0.33 min, 2 s/S)
+ avg f count: 139.77
+(time 0.35 min, 2.1 s/S)
-Iteration #2 of 2.
+Iteration #2 of 3.
. 10
WEIGHTS
- Glue = -1025
- WordPenalty = +1751.5
- LanguageModel = +10059
- LanguageModel_OOV = -4490
- PhraseModel_0 = -2640.7
- PhraseModel_1 = -3757.4
- PhraseModel_2 = -1133.1
- PhraseModel_3 = +1837.3
- PhraseModel_4 = -3534.3
- PhraseModel_5 = +2308
- PhraseModel_6 = +1677
- PassThrough = -6222
+ Glue = -1221
+ WordPenalty = +836.89
+ LanguageModel = +2332.3
+ LanguageModel_OOV = -1451
+ PhraseModel_0 = +1507.2
+ PhraseModel_1 = -2728.4
+ PhraseModel_2 = -4183.6
+ PhraseModel_3 = +1816.3
+ PhraseModel_4 = -2894.7
+ PhraseModel_5 = +1403
+ PhraseModel_6 = +35
+ PassThrough = -1097
---
- 1best avg score: 0.30764 (+0.12891)
- 1best avg model score: -2.5042e+05 (-3.3882e+05)
- avg # pairs: 725.9 (meaningless)
- avg # rank err: 725.9
+ 1best avg score: 0.17399 (+0.004325)
+ 1best avg model score: 49369 (-2.4937e+05)
+ avg # pairs: 662.4 (meaningless)
+ avg # rank err: 662.4
avg # margin viol: 0
- non0 feature count: 1499
+ non0 feature count: 1235
avg list sz: 91.3
- avg f count: 114.34
-(time 0.32 min, 1.9 s/S)
+ avg f count: 125.11
+(time 0.27 min, 1.6 s/S)
+
+Iteration #3 of 3.
+ . 10
+WEIGHTS
+ Glue = -1574
+ WordPenalty = -17.372
+ LanguageModel = +6861.8
+ LanguageModel_OOV = -3997
+ PhraseModel_0 = -398.76
+ PhraseModel_1 = -3419.6
+ PhraseModel_2 = -3186.7
+ PhraseModel_3 = +1050.8
+ PhraseModel_4 = -2902.7
+ PhraseModel_5 = -486
+ PhraseModel_6 = -436
+ PassThrough = -2985
+ ---
+ 1best avg score: 0.30742 (+0.13343)
+ 1best avg model score: -1.5393e+05 (-2.0329e+05)
+ avg # pairs: 623.8 (meaningless)
+ avg # rank err: 623.8
+ avg # margin viol: 0
+ non0 feature count: 1770
+ avg list sz: 91.3
+ avg f count: 118.58
+(time 0.25 min, 1.5 s/S)
Writing weights file to '-' ...
done
---
-Best iteration: 2 [SCORE 'fixed_stupid_bleu'=0.30764].
-This took 0.65 min.
+Best iteration: 3 [SCORE 'fixed_stupid_bleu'=0.30742].
+This took 0.86667 min.
diff --git a/training/dtrain/parallelize.rb b/training/dtrain/parallelize.rb
index 2fc66cab..60ca9422 100755
--- a/training/dtrain/parallelize.rb
+++ b/training/dtrain/parallelize.rb
@@ -21,7 +21,8 @@ opts = Trollop::options do
opt :qsub, "use qsub", :type => :bool, :default => false
opt :dtrain_binary, "path to dtrain binary", :type => :string
opt :extra_qsub, "extra qsub args", :type => :string, :default => ""
- opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => :o
+ opt :per_shard_decoder_configs, "give special decoder config per shard", :type => :string, :short => '-o'
+ opt :first_input_weights, "input weights for first iter", :type => :string, :default => '', :short => '-w'
end
usage if not opts[:config]&&opts[:shards]&&opts[:input]&&opts[:references]
@@ -54,6 +55,7 @@ input = opts[:input]
refs = opts[:references]
use_qsub = opts[:qsub]
shards_at_once = opts[:processes_at_once]
+first_input_weights = opts[:first_input_weights]
`mkdir work`
@@ -137,10 +139,13 @@ end
else
cdec_cfg = ""
end
+ if first_input_weights!='' && epoch == 0
+ input_weights = "--input_weights #{first_input_weights}"
+ end
pids << Kernel.fork {
- `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg}\
+ `#{qsub_str_start}#{dtrain_bin} -c #{ini} #{cdec_cfg} #{input_weights}\
--input #{input_files[shard]}\
- --refs #{refs_files[shard]} #{input_weights}\
+ --refs #{refs_files[shard]}\
--output work/weights.#{shard}.#{epoch}#{qsub_str_end} #{local_end}`
}
weights_files << "work/weights.#{shard}.#{epoch}"
--
cgit v1.2.3
From 12577135f7504a3909111479c9053410bfed8354 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Sun, 3 Nov 2013 21:24:51 +0100
Subject: bitext input for dtrain
---
training/dtrain/Makefile.am | 2 +-
training/dtrain/dtrain.cc | 45 ++++++++++++++++++++------
training/dtrain/dtrain.h | 2 ++
training/dtrain/examples/standard/dtrain.ini | 5 +--
training/dtrain/examples/standard/nc-wmt11.gz | Bin 0 -> 113504 bytes
5 files changed, 41 insertions(+), 13 deletions(-)
create mode 100644 training/dtrain/examples/standard/nc-wmt11.gz
diff --git a/training/dtrain/Makefile.am b/training/dtrain/Makefile.am
index 844c790d..ecb6c128 100644
--- a/training/dtrain/Makefile.am
+++ b/training/dtrain/Makefile.am
@@ -1,7 +1,7 @@
bin_PROGRAMS = dtrain
dtrain_SOURCES = dtrain.cc score.cc dtrain.h kbestget.h ksampler.h pairsampling.h score.h
-dtrain_LDADD = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a
+dtrain_LDADD = ../../decoder/libcdec.a ../../klm/search/libksearch.a ../../mteval/libmteval.a ../../utils/libutils.a ../../klm/lm/libklm.a ../../klm/util/libklm_util.a ../../klm/util/double-conversion/libklm_util_double.a -lboost_regex
AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval
diff --git a/training/dtrain/dtrain.cc b/training/dtrain/dtrain.cc
index 38a9b69a..a496f08a 100644
--- a/training/dtrain/dtrain.cc
+++ b/training/dtrain/dtrain.cc
@@ -12,8 +12,9 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
{
po::options_description ini("Configuration File Options");
ini.add_options()
- ("input", po::value()->default_value("-"), "input file (src)")
+ ("input", po::value(), "input file (src)")
("refs,r", po::value(), "references")
+ ("bitext,b", po::value(), "bitext: 'src ||| tgt'")
("output", po::value()->default_value("-"), "output weights file, '-' for STDOUT")
("input_weights", po::value(), "input weights file (e.g. from previous iteration)")
("decoder_config", po::value(), "configuration file for cdec")
@@ -73,13 +74,17 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
cerr << "Wrong 'pair_sampling' param: '" << (*cfg)["pair_sampling"].as() << "'." << endl;
return false;
}
- if(cfg->count("hi_lo") && (*cfg)["pair_sampling"].as() != "XYX") {
+ if (cfg->count("hi_lo") && (*cfg)["pair_sampling"].as() != "XYX") {
cerr << "Warning: hi_lo only works with pair_sampling XYX." << endl;
}
- if((*cfg)["hi_lo"].as() > 0.5 || (*cfg)["hi_lo"].as() < 0.01) {
+ if ((*cfg)["hi_lo"].as() > 0.5 || (*cfg)["hi_lo"].as() < 0.01) {
cerr << "hi_lo must lie in [0.01, 0.5]" << endl;
return false;
}
+ if ((cfg->count("input")>0 || cfg->count("refs")>0) && cfg->count("bitext")>0) {
+ cerr << "Provide 'input' and 'refs' or 'bitext', not both." << endl;
+ return false;
+ }
if ((*cfg)["pair_threshold"].as() < 0) {
cerr << "The threshold must be >= 0!" << endl;
return false;
@@ -208,13 +213,24 @@ main(int argc, char** argv)
// output
string output_fn = cfg["output"].as();
// input
- string input_fn = cfg["input"].as();
+ bool read_bitext = false;
+ string input_fn;
+ if (cfg.count("bitext")) {
+ read_bitext = true;
+ input_fn = cfg["bitext"].as();
+ } else {
+ input_fn = cfg["input"].as();
+ }
ReadFile input(input_fn);
// buffer input for t > 0
vector src_str_buf; // source strings (decoder takes only strings)
vector > ref_ids_buf; // references as WordID vecs
- string refs_fn = cfg["refs"].as();
- ReadFile refs(refs_fn);
+ ReadFile refs;
+ string refs_fn;
+ if (!read_bitext) {
+ refs_fn = cfg["refs"].as();
+ refs.Init(refs_fn);
+ }
unsigned in_sz = std::numeric_limits::max(); // input index, input size
vector > all_scores;
@@ -253,7 +269,8 @@ main(int argc, char** argv)
cerr << setw(25) << "max pairs " << max_pairs << endl;
cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as() << "'" << endl;
cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
- cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
+ if (!read_bitext)
+ cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
if (cfg.count("input_weights"))
cerr << setw(25) << "weights in " << "'" << cfg["input_weights"].as() << "'" << endl;
@@ -279,9 +296,16 @@ main(int argc, char** argv)
{
string in;
+ string ref;
bool next = false, stop = false; // next iteration or premature stop
if (t == 0) {
if(!getline(*input, in)) next = true;
+ if(read_bitext) {
+ vector strs;
+ boost::algorithm::split_regex(strs, in, boost::regex(" \\|\\|\\| "));
+ in = strs[0];
+ ref = strs[1];
+ }
} else {
if (ii == in_sz) next = true; // stop if we reach the end of our input
}
@@ -318,10 +342,11 @@ main(int argc, char** argv)
// getting input
vector ref_ids; // reference as vector
if (t == 0) {
- string r_;
- getline(*refs, r_);
+ if (!read_bitext) {
+ getline(*refs, ref);
+ }
vector ref_tok;
- boost::split(ref_tok, r_, boost::is_any_of(" "));
+ boost::split(ref_tok, ref, boost::is_any_of(" "));
register_and_convert(ref_tok, ref_ids);
ref_ids_buf.push_back(ref_ids);
src_str_buf.push_back(in);
diff --git a/training/dtrain/dtrain.h b/training/dtrain/dtrain.h
index 3981fb39..ccb5ad4d 100644
--- a/training/dtrain/dtrain.h
+++ b/training/dtrain/dtrain.h
@@ -9,6 +9,8 @@
#include
#include
+#include
+#include
#include
#include "decoder.h"
diff --git a/training/dtrain/examples/standard/dtrain.ini b/training/dtrain/examples/standard/dtrain.ini
index e6d6382e..7dbb4ff0 100644
--- a/training/dtrain/examples/standard/dtrain.ini
+++ b/training/dtrain/examples/standard/dtrain.ini
@@ -1,5 +1,6 @@
-input=./nc-wmt11.de.gz
-refs=./nc-wmt11.en.gz
+#input=./nc-wmt11.de.gz
+#refs=./nc-wmt11.en.gz
+bitext=./nc-wmt11.gz
output=- # a weights file (add .gz for gzip compression) or STDOUT '-'
select_weights=VOID # output average (over epochs) weight vector
decoder_config=./cdec.ini # config for cdec
diff --git a/training/dtrain/examples/standard/nc-wmt11.gz b/training/dtrain/examples/standard/nc-wmt11.gz
new file mode 100644
index 00000000..c39c5aef
Binary files /dev/null and b/training/dtrain/examples/standard/nc-wmt11.gz differ
--
cgit v1.2.3
From a9171fa0aa0ad6d7611fe079ecee464bc5f78231 Mon Sep 17 00:00:00 2001
From: Patrick Simianer
Date: Sun, 3 Nov 2013 21:56:06 +0100
Subject: cleaned up parsematch features
---
decoder/ff_parse_match.cc | 17 ++++++++++-------
decoder/ff_parse_match.h | 1 +
2 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/decoder/ff_parse_match.cc b/decoder/ff_parse_match.cc
index ed556b91..94634b27 100644
--- a/decoder/ff_parse_match.cc
+++ b/decoder/ff_parse_match.cc
@@ -13,6 +13,10 @@ using namespace std;
// implements the parse match features as described in Vilar et al. (2008)
// source trees must be represented in Penn Treebank format, e.g.
// (S (NP John) (VP (V left)))
+//
+// Annotate source sentences with ..."
+// Note: You need to escape quite a lot of stuff in all your models!
+//
struct ParseMatchFeaturesImpl {
ParseMatchFeaturesImpl(const string& param) {
@@ -42,10 +46,8 @@ struct ParseMatchFeaturesImpl {
void InitializeGrids(const string& tree, unsigned src_len) {
assert(tree.size() > 0);
- //fids_cat.clear();
fids_ef.clear();
src_tree.clear();
- //fids_cat.resize(src_len, src_len + 1);
fids_ef.resize(src_len, src_len + 1);
src_tree.resize(src_len, src_len + 1, TD::Convert("XX"));
ParseTreeString(tree, src_len);
@@ -112,7 +114,7 @@ struct ParseMatchFeaturesImpl {
int fid_ef = FD::Convert("PM");
int min_dist; // minimal distance to next syntactic constituent of this rule's LHS
int summed_min_dists; // minimal distances of LHS and NTs summed up
- if (TD::Convert(lhs).compare("XX") != 0)
+ if (TD::Convert(lhs).compare("XX") != 0)
min_dist= 0;
// compute the distance to the next syntactical constituent
else {
@@ -131,7 +133,7 @@ struct ParseMatchFeaturesImpl {
ok = 1;
break;
}
- // check if removing k words from the rule span will
+ // check if removing k words from the rule span will
// lead to a syntactical constituent
else {
//cerr << "Hilfe...!" << endl;
@@ -144,7 +146,7 @@ struct ParseMatchFeaturesImpl {
ok = 1;
break;
}
- }
+ }
}
if (ok) break;
}
@@ -183,9 +185,9 @@ struct ParseMatchFeaturesImpl {
return min_dist;
}
- Array2D src_tree; // src_tree(i,j) NT = type
+ Array2D src_tree; // src_tree(i,j) NT = type
unsigned int src_sent_len;
- mutable Array2D