summaryrefslogtreecommitdiff
path: root/dtrain
diff options
context:
space:
mode:
authorPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-26 11:36:49 +0200
committerPatrick Simianer <simianer@cl.uni-heidelberg.de>2012-04-26 11:36:49 +0200
commitfde1df3ee578564f78d7d0eac453dcb3f1740e05 (patch)
tree3d986ff73becb4b062633bc6d2a7ba946a3bc488 /dtrain
parent9572d31167162955a02276c1846ae4baefa7ee53 (diff)
remove obsolete stuff
Diffstat (limited to 'dtrain')
-rw-r--r--dtrain/NEXT7
-rw-r--r--dtrain/dtrain.cc46
-rw-r--r--dtrain/dtrain.h4
-rwxr-xr-xdtrain/hstreaming/avg.rb2
-rw-r--r--dtrain/hstreaming/cdec.ini3
-rw-r--r--dtrain/hstreaming/dtrain.ini4
-rwxr-xr-xdtrain/hstreaming/dtrain.sh3
-rwxr-xr-xdtrain/hstreaming/hadoop-streaming-job.sh7
-rwxr-xr-xdtrain/hstreaming/lplp.rb2
-rwxr-xr-xdtrain/hstreaming/rule_count/map.sh4
-rw-r--r--dtrain/hstreaming/rule_count/red.rb24
-rw-r--r--dtrain/hstreaming/rule_count/rulecount.rb13
-rw-r--r--dtrain/hstreaming/rule_count/test8
-rw-r--r--dtrain/kbestget.h2
-rw-r--r--dtrain/ksampler.h2
-rw-r--r--dtrain/pairsampling.h3
-rw-r--r--dtrain/score.h2
-rw-r--r--dtrain/test/example/cdec.ini4
-rw-r--r--dtrain/test/example/dtrain.ini12
-rw-r--r--dtrain/test/mtm11/logreg_cd/bin_class.cc4
-rw-r--r--dtrain/test/mtm11/logreg_cd/bin_class.h22
-rw-r--r--dtrain/test/mtm11/logreg_cd/log_reg.cc39
-rw-r--r--dtrain/test/mtm11/logreg_cd/log_reg.h14
-rw-r--r--dtrain/test/mtm11/mira_update/Hildreth.cpp187
-rw-r--r--dtrain/test/mtm11/mira_update/Hildreth.h10
-rw-r--r--dtrain/test/mtm11/mira_update/dtrain.cc532
-rw-r--r--dtrain/test/mtm11/mira_update/sample.h101
-rw-r--r--dtrain/test/toy/dtrain.ini4
28 files changed, 51 insertions, 1014 deletions
diff --git a/dtrain/NEXT b/dtrain/NEXT
deleted file mode 100644
index eccfb313..00000000
--- a/dtrain/NEXT
+++ /dev/null
@@ -1,7 +0,0 @@
-make svm faster (cuda)?
- other learning frameworks
-target side rule ngram feature template
-decoder meta-parameters test
-sa-extract -> leave-one-out?
-rerank while sgd?
-
diff --git a/dtrain/dtrain.cc b/dtrain/dtrain.cc
index e7a1244c..cf913765 100644
--- a/dtrain/dtrain.cc
+++ b/dtrain/dtrain.cc
@@ -15,7 +15,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
("tmp", po::value<string>()->default_value("/tmp"), "temp dir to use")
("keep", po::value<bool>()->zero_tokens(), "keep weights files for each iteration")
("hstreaming", po::value<string>(), "run in hadoop streaming mode, arg is a task id")
- ("epochs", po::value<unsigned>()->default_value(10), "# of iterations T (per shard)")
+ ("epochs", po::value<unsigned>()->default_value(10), "# of iterations T (per shard)")
("k", po::value<unsigned>()->default_value(100), "how many translations to sample")
("sample_from", po::value<string>()->default_value("kbest"), "where to sample translations from: 'kbest', 'forest'")
("filter", po::value<string>()->default_value("uniq"), "filter kbest list: 'not', 'uniq'")
@@ -47,7 +47,7 @@ dtrain_init(int argc, char** argv, po::variables_map* cfg)
po::store(po::parse_config_file(ini_f, ini), *cfg);
}
po::notify(*cfg);
- if (!cfg->count("decoder_config")) {
+ if (!cfg->count("decoder_config")) {
cerr << cl << endl;
return false;
}
@@ -93,10 +93,10 @@ main(int argc, char** argv)
{
// handle most parameters
po::variables_map cfg;
- if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong
+ if (!dtrain_init(argc, argv, &cfg)) exit(1); // something is wrong
bool quiet = false;
if (cfg.count("quiet")) quiet = true;
- bool verbose = false;
+ bool verbose = false;
if (cfg.count("verbose")) verbose = true;
bool noup = false;
if (cfg.count("noup")) noup = true;
@@ -118,7 +118,7 @@ main(int argc, char** argv)
inc_correct = true;
const unsigned k = cfg["k"].as<unsigned>();
- const unsigned N = cfg["N"].as<unsigned>();
+ const unsigned N = cfg["N"].as<unsigned>();
const unsigned T = cfg["epochs"].as<unsigned>();
const unsigned stop_after = cfg["stop_after"].as<unsigned>();
const string filter_type = cfg["filter"].as<string>();
@@ -241,7 +241,7 @@ main(int argc, char** argv)
cerr << setw(25) << "rescale " << rescale << endl;
cerr << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl;
cerr << setw(25) << "input " << "'" << input_fn << "'" << endl;
-#ifdef DTRAIN_LOCAL
+#ifdef DTRAIN_LOCAL
cerr << setw(25) << "refs " << "'" << refs_fn << "'" << endl;
#endif
cerr << setw(25) << "output " << "'" << output_fn << "'" << endl;
@@ -258,7 +258,7 @@ main(int argc, char** argv)
if (hstreaming) cerr << "reporter:status:Iteration #" << t+1 << " of " << T << endl;
- time_t start, end;
+ time_t start, end;
time(&start);
#ifndef DTRAIN_LOCAL
igzstream grammar_buf_in;
@@ -281,7 +281,7 @@ main(int argc, char** argv)
}
// stop after X sentences (but still go on for those)
if (stop_after > 0 && stop_after == ii && !next) stop = true;
-
+
// produce some pretty output
if (!quiet && !verbose) {
if (ii == 0) cerr << " ";
@@ -302,7 +302,7 @@ main(int argc, char** argv)
}
}
}
-
+
// next iteration
if (next || stop) break;
@@ -315,7 +315,7 @@ main(int argc, char** argv)
vector<string> in_split; // input: sid\tsrc\tref\tpsg
if (t == 0) {
// handling input
- split_in(in, in_split);
+ split_in(in, in_split);
if (hstreaming && ii == 0) cerr << "reporter:counter:" << task_id << ",First ID," << in_split[0] << endl;
// getting reference
vector<string> ref_tok;
@@ -369,13 +369,13 @@ main(int argc, char** argv)
ref_ids = ref_ids_buf[ii];
}
observer->SetRef(ref_ids);
- if (t == 0)
+ if (t == 0)
decoder.Decode(in, observer);
else
decoder.Decode(src_str_buf[ii], observer);
#endif
- // get (scored) samples
+ // get (scored) samples
vector<ScoredHyp>* samples = observer->GetSamples();
if (verbose) {
@@ -475,7 +475,7 @@ main(int argc, char** argv)
}
if (rescale) lambdas /= lambdas.l2norm();
-
+
++ii;
if (hstreaming) {
@@ -485,7 +485,7 @@ main(int argc, char** argv)
} // input loop
- if (average) w_average += lambdas;
+ if (average) w_average += lambdas;
if (scorer_str == "approx_bleu") scorer->Reset();
@@ -517,7 +517,7 @@ main(int argc, char** argv)
score_diff = score_avg;
model_diff = model_avg;
}
-
+
unsigned nonz = 0;
if (!quiet || hstreaming) nonz = (unsigned)lambdas.size_nonzero();
@@ -543,12 +543,12 @@ main(int argc, char** argv)
}
if (hstreaming) {
- rep.update_counter("Score 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(score_avg*DTRAIN_SCALE));
- rep.update_counter("Model 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(model_avg*DTRAIN_SCALE));
- rep.update_counter("Pairs avg #"+boost::lexical_cast<string>(t+1), (unsigned)((npairs/(weight_t)in_sz)*DTRAIN_SCALE));
- rep.update_counter("Rank errors avg #"+boost::lexical_cast<string>(t+1), (unsigned)((rank_errors/(weight_t)in_sz)*DTRAIN_SCALE));
- rep.update_counter("Margin violations avg #"+boost::lexical_cast<string>(t+1), (unsigned)((margin_violations/(weight_t)in_sz)*DTRAIN_SCALE));
- rep.update_counter("Non zero feature count #"+boost::lexical_cast<string>(t+1), nonz);
+ rep.update_counter("Score 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(score_avg*DTRAIN_SCALE));
+ rep.update_counter("Model 1best avg #"+boost::lexical_cast<string>(t+1), (unsigned)(model_avg*DTRAIN_SCALE));
+ rep.update_counter("Pairs avg #"+boost::lexical_cast<string>(t+1), (unsigned)((npairs/(weight_t)in_sz)*DTRAIN_SCALE));
+ rep.update_counter("Rank errors avg #"+boost::lexical_cast<string>(t+1), (unsigned)((rank_errors/(weight_t)in_sz)*DTRAIN_SCALE));
+ rep.update_counter("Margin violations avg #"+boost::lexical_cast<string>(t+1), (unsigned)((margin_violations/(weight_t)in_sz)*DTRAIN_SCALE));
+ rep.update_counter("Non zero feature count #"+boost::lexical_cast<string>(t+1), nonz);
rep.update_gcounter("Non zero feature count #"+boost::lexical_cast<string>(t+1), nonz);
}
@@ -575,7 +575,7 @@ main(int argc, char** argv)
if (select_weights == "best" || keep) {
lambdas.init_vector(&dense_weights);
string w_fn = "weights." + boost::lexical_cast<string>(t) + ".gz";
- Weights::WriteToFile(w_fn, dense_weights, true);
+ Weights::WriteToFile(w_fn, dense_weights, true);
}
} // outer loop
@@ -625,7 +625,7 @@ main(int argc, char** argv)
if (output_fn == "-" && hstreaming) cout << "__SHARD_COUNT__\t1" << endl;
if (!quiet) cerr << "done" << endl;
}
-
+
if (!quiet) {
cerr << _p5 << _np << endl << "---" << endl << "Best iteration: ";
cerr << best_it+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl;
diff --git a/dtrain/dtrain.h b/dtrain/dtrain.h
index 61d60657..ac13995a 100644
--- a/dtrain/dtrain.h
+++ b/dtrain/dtrain.h
@@ -13,7 +13,7 @@
#include "filelib.h"
-//#define DTRAIN_LOCAL
+#define DTRAIN_LOCAL
#define DTRAIN_DOTS 10 // after how many inputs to display a '.'
#define DTRAIN_GRAMMAR_DELIM "########EOS########"
@@ -49,7 +49,7 @@ inline void split_in(string& s, vector<string>& parts)
unsigned e = f;
f = s.find("\t", f+1);
if (e != 0) parts.push_back(s.substr(e+1, f-e-1));
- else parts.push_back(s.substr(0, f));
+ else parts.push_back(s.substr(0, f));
}
s.erase(0, f+1);
}
diff --git a/dtrain/hstreaming/avg.rb b/dtrain/hstreaming/avg.rb
index 5deb62e4..2599c732 100755
--- a/dtrain/hstreaming/avg.rb
+++ b/dtrain/hstreaming/avg.rb
@@ -1,4 +1,5 @@
#!/usr/bin/env ruby
+# first arg may be an int of custom shard count
shard_count_key = "__SHARD_COUNT__"
@@ -22,7 +23,6 @@ else
end
w.each_key { |k|
if k == shard_count_key
- #puts "# shard count: #{shard_count.to_i}"
next
else
puts "#{k}\t#{w[k]/shard_count}"
diff --git a/dtrain/hstreaming/cdec.ini b/dtrain/hstreaming/cdec.ini
index 61f13e86..d4f5cecd 100644
--- a/dtrain/hstreaming/cdec.ini
+++ b/dtrain/hstreaming/cdec.ini
@@ -2,11 +2,12 @@ formalism=scfg
add_pass_through_rules=true
scfg_max_span_limit=15
intersection_strategy=cube_pruning
-cubepruning_pop_limit=200
+cubepruning_pop_limit=30
feature_function=WordPenalty
feature_function=KLanguageModel nc-wmt11.en.srilm.gz
#feature_function=ArityPenalty
#feature_function=CMR2008ReorderingFeatures
+#feature_function=Dwarf
#feature_function=InputIndicator
#feature_function=LexNullJump
#feature_function=NewJump
diff --git a/dtrain/hstreaming/dtrain.ini b/dtrain/hstreaming/dtrain.ini
index 118a27c5..05535299 100644
--- a/dtrain/hstreaming/dtrain.ini
+++ b/dtrain/hstreaming/dtrain.ini
@@ -2,11 +2,11 @@ input=-
output=-
decoder_config=cdec.ini
tmp=/var/hadoop/mapred/local/
-epochs=10
+epochs=1
k=100
N=4
learning_rate=0.0001
-gamma=0.00001
+gamma=0
scorer=stupid_bleu
sample_from=kbest
filter=uniq
diff --git a/dtrain/hstreaming/dtrain.sh b/dtrain/hstreaming/dtrain.sh
index ea0276dd..877ff94c 100755
--- a/dtrain/hstreaming/dtrain.sh
+++ b/dtrain/hstreaming/dtrain.sh
@@ -1,8 +1,9 @@
#!/bin/bash
+# script to run dtrain with a task id
pushd . &>/dev/null
cd ..
ID=$(basename $(pwd)) # attempt_...
popd &>/dev/null
-./dtrain -c dtrain.ini --hstreaming $ID
+./dtrain -c dtrain.ini --hstreaming $ID
diff --git a/dtrain/hstreaming/hadoop-streaming-job.sh b/dtrain/hstreaming/hadoop-streaming-job.sh
index 90c2b790..92419956 100755
--- a/dtrain/hstreaming/hadoop-streaming-job.sh
+++ b/dtrain/hstreaming/hadoop-streaming-job.sh
@@ -6,17 +6,16 @@ EXP=a_simple_test
HADOOP_HOME=/usr/lib/hadoop-0.20
JAR=contrib/streaming/hadoop-streaming-0.20.2-cdh3u1.jar
HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR"
-# ^^^
IN=input_on_hdfs
OUT=output_weights_on_hdfs
-# you can remove the -reducer line if you want to
+# you can -reducer to NONE if you want to
# do feature selection/averaging locally (e.g. to
-# keep weights of the iterations)
+# keep weights of all epochs)
$HSTREAMING \
-mapper "dtrain.sh" \
- -reducer "lplp.rb l2 select_k 100000" \
+ -reducer "ruby lplp.rb l2 select_k 100000" \
-input $IN \
-output $OUT \
-file dtrain.sh \
diff --git a/dtrain/hstreaming/lplp.rb b/dtrain/hstreaming/lplp.rb
index 57353adb..f0cd58c5 100755
--- a/dtrain/hstreaming/lplp.rb
+++ b/dtrain/hstreaming/lplp.rb
@@ -29,7 +29,7 @@ end
# selection
def select_k(weights, norm_fun, n, k=10000)
weights.sort{|a,b| norm_fun.call(b[1], n) <=> norm_fun.call(a[1], n)}.each { |p|
- puts "#{p[0]}\t#{mean(p[1], n)}"
+ puts "#{p[0]}\t#{mean(p[1], n)}"
k -= 1
if k == 0 then break end
}
diff --git a/dtrain/hstreaming/rule_count/map.sh b/dtrain/hstreaming/rule_count/map.sh
deleted file mode 100755
index ae75fece..00000000
--- a/dtrain/hstreaming/rule_count/map.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-
-ruby rulecount.rb | sort | ruby red.rb
-
diff --git a/dtrain/hstreaming/rule_count/red.rb b/dtrain/hstreaming/rule_count/red.rb
deleted file mode 100644
index 874ae7ac..00000000
--- a/dtrain/hstreaming/rule_count/red.rb
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env ruby
-
-STDIN.set_encoding 'utf-8'
-STDOUT.set_encoding 'utf-8'
-
-def output(key, val)
- puts "#{key}\t#{val}"
-end
-
-prev_key = nil
-sum = 0
-while line = STDIN.gets
- key, val = line.strip.split /\t/
- if key != prev_key && sum > 0
- output prev_key, sum
- prev_key = key
- sum = 0
- elsif !prev_key
- prev_key = key
- end
- sum += val.to_i
-end
-output prev_key, sum
-
diff --git a/dtrain/hstreaming/rule_count/rulecount.rb b/dtrain/hstreaming/rule_count/rulecount.rb
deleted file mode 100644
index 67361fa4..00000000
--- a/dtrain/hstreaming/rule_count/rulecount.rb
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env ruby
-
-STDIN.set_encoding 'utf-8'
-STDOUT.set_encoding 'utf-8'
-
-while line = STDIN.gets
- a = line.strip.chomp.split "\t"
- a[3..a.size].each { |r|
- id = r.split("|||")[0..2].join("|||").to_s.strip.gsub("\s", "_")
- puts "#{id}\t1"
- }
-end
-
diff --git a/dtrain/hstreaming/rule_count/test b/dtrain/hstreaming/rule_count/test
deleted file mode 100644
index acd00a5e..00000000
--- a/dtrain/hstreaming/rule_count/test
+++ /dev/null
@@ -1,8 +0,0 @@
-a 1
-a 1
-a 1
-b 1
-b 1
-c 1
-d 1
-a 1
diff --git a/dtrain/kbestget.h b/dtrain/kbestget.h
index 0c2da994..bcd82610 100644
--- a/dtrain/kbestget.h
+++ b/dtrain/kbestget.h
@@ -59,7 +59,7 @@ struct HypSampler : public DecoderObserver
vector<WordID>* ref_;
virtual vector<ScoredHyp>* GetSamples()=0;
inline void SetScorer(LocalScorer* scorer) { scorer_ = scorer; }
- inline void SetRef(vector<WordID>& ref) { ref_ = &ref; }
+ inline void SetRef(vector<WordID>& ref) { ref_ = &ref; }
};
////////////////////////////////////////////////////////////////////////////////
diff --git a/dtrain/ksampler.h b/dtrain/ksampler.h
index c45c8f64..eb4813ab 100644
--- a/dtrain/ksampler.h
+++ b/dtrain/ksampler.h
@@ -35,7 +35,7 @@ struct KSampler : public HypSampler
ScoredHyp h;
h.w = samples[i].words;
h.f = samples[i].fmap;
- h.model = log(samples[i].model_score);
+ h.model = log(samples[i].model_score);
h.rank = i;
h.score = scorer_->Score(h.w, *ref_, i);
s_.push_back(h);
diff --git a/dtrain/pairsampling.h b/dtrain/pairsampling.h
index 1fc5b8a0..93c0630a 100644
--- a/dtrain/pairsampling.h
+++ b/dtrain/pairsampling.h
@@ -46,6 +46,7 @@ part108010(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training, s
unsigned sz = s->size();
unsigned slice = 10;
unsigned sep = sz%slice;
+ cout << "sep " << sep <<endl;
if (sep == 0) sep = sz/slice;
for (unsigned i = 0; i < sep; i++) {
for (unsigned j = sep; j < sz; j++) {
@@ -107,7 +108,7 @@ PROsampling(vector<ScoredHyp>* s, vector<pair<ScoredHyp,ScoredHyp> >& training,
}
if (training.size() > 50) {
sort(training.begin(), training.end(), _PRO_cmp_pair_by_diff);
- training.erase(training.begin()+50, training.end());
+ training.erase(training.begin()+50, training.end());
}
return;
}
diff --git a/dtrain/score.h b/dtrain/score.h
index 85cd0317..5aceb81f 100644
--- a/dtrain/score.h
+++ b/dtrain/score.h
@@ -15,7 +15,7 @@ struct NgramCounts
map<unsigned, unsigned> clipped;
map<unsigned, unsigned> sum;
- NgramCounts(const unsigned N) : N_(N) { Zero(); }
+ NgramCounts(const unsigned N) : N_(N) { Zero(); }
inline void
operator+=(const NgramCounts& rhs)
diff --git a/dtrain/test/example/cdec.ini b/dtrain/test/example/cdec.ini
index fe5ca759..6642107f 100644
--- a/dtrain/test/example/cdec.ini
+++ b/dtrain/test/example/cdec.ini
@@ -5,7 +5,8 @@ intersection_strategy=cube_pruning
cubepruning_pop_limit=30
feature_function=WordPenalty
feature_function=KLanguageModel test/example/nc-wmt11.en.srilm.gz
-# all currently working feature function for translation:
+# all currently working feature functions for translation:
+# (with those features active that were used in the ACL paper)
#feature_function=ArityPenalty
#feature_function=CMR2008ReorderingFeatures
#feature_function=Dwarf
@@ -21,4 +22,3 @@ feature_function=RuleShape
#feature_function=SourceSpanSizeFeatures
#feature_function=SourceWordPenalty
#feature_function=SpanFeatures
-# ^^^ features active that were used in the ACL paper
diff --git a/dtrain/test/example/dtrain.ini b/dtrain/test/example/dtrain.ini
index 66be6bf2..b59250f3 100644
--- a/dtrain/test/example/dtrain.ini
+++ b/dtrain/test/example/dtrain.ini
@@ -1,18 +1,18 @@
-input=test/example/nc-wmt11.1k.gz # use '-' for stdin
+input=test/example/nc-wmt11.1k.gz # use '-' for STDIN
output=weights.gz # a weights file (add .gz for gzip compression) or STDOUT '-'
decoder_config=test/example/cdec.ini # config for cdec
# weights for these features will be printed on each iteration
print_weights=Glue WordPenalty LanguageModel LanguageModel_OOV PhraseModel_0 PhraseModel_1 PhraseModel_2 PhraseModel_3 PhraseModel_4 PhraseModel_5 PhraseModel_6 PassThrough
tmp=/tmp
-stop_after=100 # stop epoch after 10 inputs
+stop_after=100 # stop epoch after 100 inputs
# interesting stuff
-epochs=100 # run over input 3 times
+epochs=3 # run over input 3 times
k=100 # use 100best lists
-N=4 # optimize (approx) BLEU4
+N=4 # optimize (approx) BLEU4
+scorer=stupid_bleu # use 'stupid' BLEU+1
learning_rate=0.0001 # learning rate
-gamma=0 # use SVM reg
-scorer=smooth_bleu # use smooth BLEU of (Liang et al. '06)
+gamma=0 # use SVM reg
sample_from=kbest # use kbest lists (as opposed to forest)
filter=uniq # only unique entries in kbest (surface form)
pair_sampling=108010 # 10 vs 80 vs 10 and 80 vs 10
diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.cc b/dtrain/test/mtm11/logreg_cd/bin_class.cc
deleted file mode 100644
index 19bcde25..00000000
--- a/dtrain/test/mtm11/logreg_cd/bin_class.cc
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "bin_class.h"
-
-Objective::~Objective() {}
-
diff --git a/dtrain/test/mtm11/logreg_cd/bin_class.h b/dtrain/test/mtm11/logreg_cd/bin_class.h
deleted file mode 100644
index 3466109a..00000000
--- a/dtrain/test/mtm11/logreg_cd/bin_class.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _BIN_CLASS_H_
-#define _BIN_CLASS_H_
-
-#include <vector>
-#include "sparse_vector.h"
-
-struct TrainingInstance {
- // TODO add other info? loss for MIRA-type updates?
- SparseVector<double> x_feature_map;
- bool y;
-};
-
-struct Objective {
- virtual ~Objective();
-
- // returns f(x) and f'(x)
- virtual double ObjectiveAndGradient(const SparseVector<double>& x,
- const std::vector<TrainingInstance>& training_instances,
- SparseVector<double>* g) const = 0;
-};
-
-#endif
diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.cc b/dtrain/test/mtm11/logreg_cd/log_reg.cc
deleted file mode 100644
index ec2331fe..00000000
--- a/dtrain/test/mtm11/logreg_cd/log_reg.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-#include "log_reg.h"
-
-#include <vector>
-#include <cmath>
-
-#include "sparse_vector.h"
-
-using namespace std;
-
-double LogisticRegression::ObjectiveAndGradient(const SparseVector<double>& x,
- const vector<TrainingInstance>& training_instances,
- SparseVector<double>* g) const {
- double cll = 0;
- for (int i = 0; i < training_instances.size(); ++i) {
- const double dotprod = training_instances[i].x_feature_map.dot(x); // TODO no bias, if bias, add x[0]
- double lp_false = dotprod;
- double lp_true = -dotprod;
- if (0 < lp_true) {
- lp_true += log1p(exp(-lp_true));
- lp_false = log1p(exp(lp_false));
- } else {
- lp_true = log1p(exp(lp_true));
- lp_false += log1p(exp(-lp_false));
- }
- lp_true *= -1;
- lp_false *= -1;
- if (training_instances[i].y) { // true label
- cll -= lp_true;
- (*g) -= training_instances[i].x_feature_map * exp(lp_false);
- // (*g)[0] -= exp(lp_false); // bias
- } else { // false label
- cll -= lp_false;
- (*g) += training_instances[i].x_feature_map * exp(lp_true);
- // g += corpus[i].second * exp(lp_true);
- }
- }
- return cll;
-}
-
diff --git a/dtrain/test/mtm11/logreg_cd/log_reg.h b/dtrain/test/mtm11/logreg_cd/log_reg.h
deleted file mode 100644
index ecc560b8..00000000
--- a/dtrain/test/mtm11/logreg_cd/log_reg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LOG_REG_H_
-#define _LOG_REG_H_
-
-#include <vector>
-#include "sparse_vector.h"
-#include "bin_class.h"
-
-struct LogisticRegression : public Objective {
- double ObjectiveAndGradient(const SparseVector<double>& x,
- const std::vector<TrainingInstance>& training_instances,
- SparseVector<double>* g) const;
-};
-
-#endif
diff --git a/dtrain/test/mtm11/mira_update/Hildreth.cpp b/dtrain/test/mtm11/mira_update/Hildreth.cpp
deleted file mode 100644
index 0e67eb15..00000000
--- a/dtrain/test/mtm11/mira_update/Hildreth.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-#include "Hildreth.h"
-#include "sparse_vector.h"
-
-using namespace std;
-
-namespace Mira {
- vector<double> Hildreth::optimise (vector< SparseVector<double> >& a, vector<double>& b) {
-
- size_t i;
- int max_iter = 10000;
- double eps = 0.00000001;
- double zero = 0.000000000001;
-
- vector<double> alpha ( b.size() );
- vector<double> F ( b.size() );
- vector<double> kkt ( b.size() );
-
- double max_kkt = -1e100;
-
- size_t K = b.size();
-
- double A[K][K];
- bool is_computed[K];
- for ( i = 0; i < K; i++ )
- {
- A[i][i] = a[i].dot(a[i]);
- is_computed[i] = false;
- }
-
- int max_kkt_i = -1;
-
-
- for ( i = 0; i < b.size(); i++ )
- {
- F[i] = b[i];
- kkt[i] = F[i];
- if ( kkt[i] > max_kkt )
- {
- max_kkt = kkt[i];
- max_kkt_i = i;
- }
- }
-
- int iter = 0;
- double diff_alpha;
- double try_alpha;
- double add_alpha;
-
- while ( max_kkt >= eps && iter < max_iter )
- {
-
- diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i];
- try_alpha = alpha[max_kkt_i] + diff_alpha;
- add_alpha = 0.0;
-
- if ( try_alpha < 0.0 )
- add_alpha = -1.0 * alpha[max_kkt_i];
- else
- add_alpha = diff_alpha;
-
- alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha;
-
- if ( !is_computed[max_kkt_i] )
- {
- for ( i = 0; i < K; i++ )
- {
- A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1
- //A[i][max_kkt_i] = 0; // for version 1
- is_computed[max_kkt_i] = true;
- }
- }
-
- for ( i = 0; i < F.size(); i++ )
- {
- F[i] -= add_alpha * A[i][max_kkt_i];
- kkt[i] = F[i];
- if ( alpha[i] > zero )
- kkt[i] = abs ( F[i] );
- }
- max_kkt = -1e100;
- max_kkt_i = -1;
- for ( i = 0; i < F.size(); i++ )
- if ( kkt[i] > max_kkt )
- {
- max_kkt = kkt[i];
- max_kkt_i = i;
- }
-
- iter++;
- }
-
- return alpha;
- }
-
- vector<double> Hildreth::optimise (vector< SparseVector<double> >& a, vector<double>& b, double C) {
-
- size_t i;
- int max_iter = 10000;
- double eps = 0.00000001;
- double zero = 0.000000000001;
-
- vector<double> alpha ( b.size() );
- vector<double> F ( b.size() );
- vector<double> kkt ( b.size() );
-
- double max_kkt = -1e100;
-
- size_t K = b.size();
-
- double A[K][K];
- bool is_computed[K];
- for ( i = 0; i < K; i++ )
- {
- A[i][i] = a[i].dot(a[i]);
- is_computed[i] = false;
- }
-
- int max_kkt_i = -1;
-
-
- for ( i = 0; i < b.size(); i++ )
- {
- F[i] = b[i];
- kkt[i] = F[i];
- if ( kkt[i] > max_kkt )
- {
- max_kkt = kkt[i];
- max_kkt_i = i;
- }
- }
-
- int iter = 0;
- double diff_alpha;
- double try_alpha;
- double add_alpha;
-
- while ( max_kkt >= eps && iter < max_iter )
- {
-
- diff_alpha = A[max_kkt_i][max_kkt_i] <= zero ? 0.0 : F[max_kkt_i]/A[max_kkt_i][max_kkt_i];
- try_alpha = alpha[max_kkt_i] + diff_alpha;
- add_alpha = 0.0;
-
- if ( try_alpha < 0.0 )
- add_alpha = -1.0 * alpha[max_kkt_i];
- else if (try_alpha > C)
- add_alpha = C - alpha[max_kkt_i];
- else
- add_alpha = diff_alpha;
-
- alpha[max_kkt_i] = alpha[max_kkt_i] + add_alpha;
-
- if ( !is_computed[max_kkt_i] )
- {
- for ( i = 0; i < K; i++ )
- {
- A[i][max_kkt_i] = a[i].dot(a[max_kkt_i] ); // for version 1
- //A[i][max_kkt_i] = 0; // for version 1
- is_computed[max_kkt_i] = true;
- }
- }
-
- for ( i = 0; i < F.size(); i++ )
- {
- F[i] -= add_alpha * A[i][max_kkt_i];
- kkt[i] = F[i];
- if (alpha[i] > C - zero)
- kkt[i]=-kkt[i];
- else if (alpha[i] > zero)
- kkt[i] = abs(F[i]);
-
- }
- max_kkt = -1e100;
- max_kkt_i = -1;
- for ( i = 0; i < F.size(); i++ )
- if ( kkt[i] > max_kkt )
- {
- max_kkt = kkt[i];
- max_kkt_i = i;
- }
-
- iter++;
- }
-
- return alpha;
- }
-}
diff --git a/dtrain/test/mtm11/mira_update/Hildreth.h b/dtrain/test/mtm11/mira_update/Hildreth.h
deleted file mode 100644
index 8d791085..00000000
--- a/dtrain/test/mtm11/mira_update/Hildreth.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#include "sparse_vector.h"
-
-namespace Mira {
- class Hildreth {
- public :
- static std::vector<double> optimise(std::vector< SparseVector<double> >& a, std::vector<double>& b);
- static std::vector<double> optimise(std::vector< SparseVector<double> >& a, std::vector<double>& b, double C);
- };
-}
-
diff --git a/dtrain/test/mtm11/mira_update/dtrain.cc b/dtrain/test/mtm11/mira_update/dtrain.cc
deleted file mode 100644
index 933417a4..00000000
--- a/dtrain/test/mtm11/mira_update/dtrain.cc
+++ /dev/null
@@ -1,532 +0,0 @@
-#include "common.h"
-#include "kbestget.h"
-#include "util.h"
-#include "sample.h"
-#include "Hildreth.h"
-
-#include "ksampler.h"
-
-// boost compression
-#include <boost/iostreams/device/file.hpp>
-#include <boost/iostreams/filtering_stream.hpp>
-#include <boost/iostreams/filter/gzip.hpp>
-//#include <boost/iostreams/filter/zlib.hpp>
-//#include <boost/iostreams/filter/bzip2.hpp>
-using namespace boost::iostreams;
-
-
-#ifdef DTRAIN_DEBUG
-#include "tests.h"
-#endif
-
-
-/*
- * init
- *
- */
-bool
-init(int argc, char** argv, po::variables_map* cfg)
-{
- po::options_description conff( "Configuration File Options" );
- size_t k, N, T, stop, n_pairs;
- string s, f, update_type;
- conff.add_options()
- ( "decoder_config", po::value<string>(), "configuration file for cdec" )
- ( "kbest", po::value<size_t>(&k)->default_value(DTRAIN_DEFAULT_K), "k for kbest" )
- ( "ngrams", po::value<size_t>(&N)->default_value(DTRAIN_DEFAULT_N), "N for Ngrams" )
- ( "filter", po::value<string>(&f)->default_value("unique"), "filter kbest list" )
- ( "epochs", po::value<size_t>(&T)->default_value(DTRAIN_DEFAULT_T), "# of iterations T" )
- ( "input", po::value<string>(), "input file" )
- ( "scorer", po::value<string>(&s)->default_value(DTRAIN_DEFAULT_SCORER), "scoring metric" )
- ( "output", po::value<string>(), "output weights file" )
- ( "stop_after", po::value<size_t>(&stop)->default_value(0), "stop after X input sentences" )
- ( "weights_file", po::value<string>(), "input weights file (e.g. from previous iteration)" )
- ( "wprint", po::value<string>(), "weights to print on each iteration" )
- ( "noup", po::value<bool>()->zero_tokens(), "do not update weights" );
-
- po::options_description clo("Command Line Options");
- clo.add_options()
- ( "config,c", po::value<string>(), "dtrain config file" )
- ( "quiet,q", po::value<bool>()->zero_tokens(), "be quiet" )
- ( "update-type", po::value<string>(&update_type)->default_value("mira"), "perceptron or mira" )
- ( "n-pairs", po::value<size_t>(&n_pairs)->default_value(10), "number of pairs used to compute update" )
- ( "verbose,v", po::value<bool>()->zero_tokens(), "be verbose" )
-#ifndef DTRAIN_DEBUG
- ;
-#else
- ( "test", "run tests and exit");
-#endif
- po::options_description config_options, cmdline_options;
-
- config_options.add(conff);
- cmdline_options.add(clo);
- cmdline_options.add(conff);
-
- po::store( parse_command_line(argc, argv, cmdline_options), *cfg );
- if ( cfg->count("config") ) {
- ifstream config( (*cfg)["config"].as<string>().c_str() );
- po::store( po::parse_config_file(config, config_options), *cfg );
- }
- po::notify(*cfg);
-
- if ( !cfg->count("decoder_config") || !cfg->count("input") ) {
- cerr << cmdline_options << endl;
- return false;
- }
- if ( cfg->count("noup") && cfg->count("decode") ) {
- cerr << "You can't use 'noup' and 'decode' at once." << endl;
- return false;
- }
- if ( cfg->count("filter") && (*cfg)["filter"].as<string>() != "unique"
- && (*cfg)["filter"].as<string>() != "no" ) {
- cerr << "Wrong 'filter' type: '" << (*cfg)["filter"].as<string>() << "'." << endl;
- }
- #ifdef DTRAIN_DEBUG
- if ( !cfg->count("test") ) {
- cerr << cmdline_options << endl;
- return false;
- }
- #endif
- return true;
-}
-
-
-// output formatting
-ostream& _nopos( ostream& out ) { return out << resetiosflags( ios::showpos ); }
-ostream& _pos( ostream& out ) { return out << setiosflags( ios::showpos ); }
-ostream& _prec2( ostream& out ) { return out << setprecision(2); }
-ostream& _prec5( ostream& out ) { return out << setprecision(5); }
-
-
-
-
-/*
- * dtrain
- *
- */
-int
-main( int argc, char** argv )
-{
- cout << setprecision( 5 );
- // handle most parameters
- po::variables_map cfg;
- if ( ! init(argc, argv, &cfg) ) exit(1); // something is wrong
-#ifdef DTRAIN_DEBUG
- if ( cfg.count("test") ) run_tests(); // run tests and exit
-#endif
- bool quiet = false;
- if ( cfg.count("quiet") ) quiet = true;
- bool verbose = false;
- if ( cfg.count("verbose") ) verbose = true;
- bool noup = false;
- if ( cfg.count("noup") ) noup = true;
- const size_t k = cfg["kbest"].as<size_t>();
- const size_t N = cfg["ngrams"].as<size_t>();
- const size_t T = cfg["epochs"].as<size_t>();
- const size_t stop_after = cfg["stop_after"].as<size_t>();
- const string filter_type = cfg["filter"].as<string>();
- const string update_type = cfg["update-type"].as<string>();
- const size_t n_pairs = cfg["n-pairs"].as<size_t>();
- const string output_file = cfg["output"].as<string>();
- if ( !quiet ) {
- cout << endl << "dtrain" << endl << "Parameters:" << endl;
- cout << setw(25) << "k " << k << endl;
- cout << setw(25) << "N " << N << endl;
- cout << setw(25) << "T " << T << endl;
- if ( cfg.count("stop-after") )
- cout << setw(25) << "stop_after " << stop_after << endl;
- if ( cfg.count("weights") )
- cout << setw(25) << "weights " << cfg["weights"].as<string>() << endl;
- cout << setw(25) << "input " << "'" << cfg["input"].as<string>() << "'" << endl;
- cout << setw(25) << "filter " << "'" << filter_type << "'" << endl;
- }
-
- vector<string> wprint;
- if ( cfg.count("wprint") ) {
- boost::split( wprint, cfg["wprint"].as<string>(), boost::is_any_of(" ") );
- }
-
- // setup decoder, observer
- register_feature_functions();
- SetSilent(true);
- ReadFile ini_rf( cfg["decoder_config"].as<string>() );
- if ( !quiet )
- cout << setw(25) << "cdec cfg " << "'" << cfg["decoder_config"].as<string>() << "'" << endl;
- Decoder decoder( ini_rf.stream() );
- //KBestGetter observer( k, filter_type );
- MT19937 rng;
- KSampler observer( k, &rng );
-
- // scoring metric/scorer
- string scorer_str = cfg["scorer"].as<string>();
- double (*scorer)( NgramCounts&, const size_t, const size_t, size_t, vector<float> );
- if ( scorer_str == "bleu" ) {
- scorer = &bleu;
- } else if ( scorer_str == "stupid_bleu" ) {
- scorer = &stupid_bleu;
- } else if ( scorer_str == "smooth_bleu" ) {
- scorer = &smooth_bleu;
- } else if ( scorer_str == "approx_bleu" ) {
- scorer = &approx_bleu;
- } else {
- cerr << "Don't know scoring metric: '" << scorer_str << "', exiting." << endl;
- exit(1);
- }
- // for approx_bleu
- NgramCounts global_counts( N ); // counts for 1 best translations
- size_t global_hyp_len = 0; // sum hypothesis lengths
- size_t global_ref_len = 0; // sum reference lengths
- // this is all BLEU implmentations
- vector<float> bleu_weights; // we leave this empty -> 1/N; TODO?
- if ( !quiet ) cout << setw(26) << "scorer '" << scorer_str << "'" << endl << endl;
-
- // init weights
- Weights weights;
- if ( cfg.count("weights") ) weights.InitFromFile( cfg["weights"].as<string>() );
- SparseVector<double> lambdas;
- weights.InitSparseVector( &lambdas );
- vector<double> dense_weights;
-
- // input
- if ( !quiet && !verbose )
- cout << "(a dot represents " << DTRAIN_DOTS << " lines of input)" << endl;
- string input_fn = cfg["input"].as<string>();
- ifstream input;
- if ( input_fn != "-" ) input.open( input_fn.c_str() );
- string in;
- vector<string> in_split; // input: src\tref\tpsg
- vector<string> ref_tok; // tokenized reference
- vector<WordID> ref_ids; // reference as vector of WordID
- string grammar_str;
-
- // buffer input for t > 0
- vector<string> src_str_buf; // source strings, TODO? memory
- vector<vector<WordID> > ref_ids_buf; // references as WordID vecs
- filtering_ostream grammar_buf; // written to compressed file in /tmp
- // this is for writing the grammar buffer file
- grammar_buf.push( gzip_compressor() );
- char grammar_buf_tmp_fn[] = DTRAIN_TMP_DIR"/dtrain-grammars-XXXXXX";
- mkstemp( grammar_buf_tmp_fn );
- grammar_buf.push( file_sink(grammar_buf_tmp_fn, ios::binary | ios::trunc) );
-
- size_t sid = 0, in_sz = 99999999; // sentence id, input size
- double acc_1best_score = 0., acc_1best_model = 0.;
- vector<vector<double> > scores_per_iter;
- double max_score = 0.;
- size_t best_t = 0;
- bool next = false, stop = false;
- double score = 0.;
- size_t cand_len = 0;
- double overall_time = 0.;
-
- // for the perceptron/SVM; TODO as params
- double eta = 0.0005;
- double gamma = 0.;//01; // -> SVM
- lambdas.add_value( FD::Convert("__bias"), 0 );
-
- // for random sampling
- srand ( time(NULL) );
-
-
- for ( size_t t = 0; t < T; t++ ) // T epochs
- {
-
- time_t start, end;
- time( &start );
-
- // actually, we need only need this if t > 0 FIXME
- ifstream grammar_file( grammar_buf_tmp_fn, ios_base::in | ios_base::binary );
- filtering_istream grammar_buf_in;
- grammar_buf_in.push( gzip_decompressor() );
- grammar_buf_in.push( grammar_file );
-
- // reset average scores
- acc_1best_score = acc_1best_model = 0.;
-
- // reset sentence counter
- sid = 0;
-
- if ( !quiet ) cout << "Iteration #" << t+1 << " of " << T << "." << endl;
-
- while( true )
- {
-
- // get input from stdin or file
- in.clear();
- next = stop = false; // next iteration, premature stop
- if ( t == 0 ) {
- if ( input_fn == "-" ) {
- if ( !getline(cin, in) ) next = true;
- } else {
- if ( !getline(input, in) ) next = true;
- }
- } else {
- if ( sid == in_sz ) next = true; // stop if we reach the end of our input
- }
- // stop after X sentences (but still iterate for those)
- if ( stop_after > 0 && stop_after == sid && !next ) stop = true;
-
- // produce some pretty output
- if ( !quiet && !verbose ) {
- if ( sid == 0 ) cout << " ";
- if ( (sid+1) % (DTRAIN_DOTS) == 0 ) {
- cout << ".";
- cout.flush();
- }
- if ( (sid+1) % (20*DTRAIN_DOTS) == 0) {
- cout << " " << sid+1 << endl;
- if ( !next && !stop ) cout << " ";
- }
- if ( stop ) {
- if ( sid % (20*DTRAIN_DOTS) != 0 ) cout << " " << sid << endl;
- cout << "Stopping after " << stop_after << " input sentences." << endl;
- } else {
- if ( next ) {
- if ( sid % (20*DTRAIN_DOTS) != 0 ) {
- cout << " " << sid << endl;
- }
- }
- }
- }
-
- // next iteration
- if ( next || stop ) break;
-
- // weights
- dense_weights.clear();
- weights.InitFromVector( lambdas );
- weights.InitVector( &dense_weights );
- decoder.SetWeights( dense_weights );
-
- if ( t == 0 ) {
- // handling input
- in_split.clear();
- boost::split( in_split, in, boost::is_any_of("\t") ); // in_split[0] is id
- // getting reference
- ref_tok.clear(); ref_ids.clear();
- boost::split( ref_tok, in_split[2], boost::is_any_of(" ") );
- register_and_convert( ref_tok, ref_ids );
- ref_ids_buf.push_back( ref_ids );
- // process and set grammar
- bool broken_grammar = true;
- for ( string::iterator ti = in_split[3].begin(); ti != in_split[3].end(); ti++ ) {
- if ( !isspace(*ti) ) {
- broken_grammar = false;
- break;
- }
- }
- if ( broken_grammar ) continue;
- grammar_str = boost::replace_all_copy( in_split[3], " __NEXT__RULE__ ", "\n" ) + "\n"; // FIXME copy, __
- grammar_buf << grammar_str << DTRAIN_GRAMMAR_DELIM << endl;
- decoder.SetSentenceGrammarFromString( grammar_str );
- // decode, kbest
- src_str_buf.push_back( in_split[1] );
- decoder.Decode( in_split[1], &observer );
- } else {
- // get buffered grammar
- grammar_str.clear();
- int i = 1;
- while ( true ) {
- string g;
- getline( grammar_buf_in, g );
- if ( g == DTRAIN_GRAMMAR_DELIM ) break;
- grammar_str += g+"\n";
- i += 1;
- }
- decoder.SetSentenceGrammarFromString( grammar_str );
- // decode, kbest
- decoder.Decode( src_str_buf[sid], &observer );
- }
-
- // get kbest list
- KBestList* kb;
- //if ( ) { // TODO get from forest
- kb = observer.GetKBest();
- //}
-
- // scoring kbest
- if ( t > 0 ) ref_ids = ref_ids_buf[sid];
- for ( size_t i = 0; i < kb->GetSize(); i++ ) {
- NgramCounts counts = make_ngram_counts( ref_ids, kb->sents[i], N );
- // this is for approx bleu
- if ( scorer_str == "approx_bleu" ) {
- if ( i == 0 ) { // 'context of 1best translations'
- global_counts += counts;
- global_hyp_len += kb->sents[i].size();
- global_ref_len += ref_ids.size();
- counts.reset();
- cand_len = 0;
- } else {
- cand_len = kb->sents[i].size();
- }
- NgramCounts counts_tmp = global_counts + counts;
- // TODO as param
- score = 0.9 * scorer( counts_tmp,
- global_ref_len,
- global_hyp_len + cand_len, N, bleu_weights );
- } else {
- // other scorers
- cand_len = kb->sents[i].size();
- score = scorer( counts,
- ref_ids.size(),
- kb->sents[i].size(), N, bleu_weights );
- }
-
- kb->scores.push_back( score );
-
- if ( i == 0 ) {
- acc_1best_score += score;
- acc_1best_model += kb->model_scores[i];
- }
-
- if ( verbose ) {
- if ( i == 0 ) cout << "'" << TD::GetString( ref_ids ) << "' [ref]" << endl;
- cout << _prec5 << _nopos << "[hyp " << i << "] " << "'" << TD::GetString( kb->sents[i] ) << "'";
- cout << " [SCORE=" << score << ",model="<< kb->model_scores[i] << "]" << endl;
- cout << kb->feats[i] << endl; // this is maybe too verbose
- }
- } // Nbest loop
-
- if ( verbose ) cout << endl;
-
-
- // UPDATE WEIGHTS
- if ( !noup ) {
-
- TrainingInstances pairs;
- sample_all( kb, pairs, n_pairs );
-
- vector< SparseVector<double> > featureValueDiffs;
- vector<double> lossMinusModelScoreDiffs;
- for ( TrainingInstances::iterator ti = pairs.begin();
- ti != pairs.end(); ti++ ) {
-
- SparseVector<double> dv;
- if ( ti->first_score - ti->second_score < 0 ) {
- dv = ti->second - ti->first;
- dv.add_value( FD::Convert("__bias"), -1 );
-
- featureValueDiffs.push_back(dv);
- double lossMinusModelScoreDiff = ti->loss_diff - ti->model_score_diff;
- lossMinusModelScoreDiffs.push_back(lossMinusModelScoreDiff);
-
- if (update_type == "perceptron") {
- lambdas += dv * eta;
- cerr << "after perceptron update: " << lambdas << endl << endl;
- }
-
- if ( verbose ) {
- cout << "{{ f("<< ti->first_rank <<") > f(" << ti->second_rank << ") but g(i)="<< ti->first_score <<" < g(j)="<< ti->second_score << " so update" << endl;
- cout << " i " << TD::GetString(kb->sents[ti->first_rank]) << endl;
- cout << " " << kb->feats[ti->first_rank] << endl;
- cout << " j " << TD::GetString(kb->sents[ti->second_rank]) << endl;
- cout << " " << kb->feats[ti->second_rank] << endl;
- cout << " diff vec: " << dv << endl;
- cout << " lambdas after update: " << lambdas << endl;
- cout << "}}" << endl;
- }
- } else {
- //SparseVector<double> reg;
- //reg = lambdas * ( 2 * gamma );
- //lambdas += reg * ( -eta );
- }
- }
- cerr << "Collected " << featureValueDiffs.size() << " constraints." << endl;
-
- double slack = 0.01;
- if (update_type == "mira") {
- if (featureValueDiffs.size() > 0) {
- vector<double> alphas;
- if (slack != 0) {
- alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs, slack);
- } else {
- alphas = Mira::Hildreth::optimise(featureValueDiffs, lossMinusModelScoreDiffs);
- }
-
- for (size_t k = 0; k < featureValueDiffs.size(); ++k) {
- lambdas += featureValueDiffs[k] * alphas[k];
- }
- // cerr << "after mira update: " << lambdas << endl << endl;
- }
- }
- }
-
- ++sid;
-
- } // input loop
-
- if ( t == 0 ) in_sz = sid; // remember size (lines) of input
-
- // print some stats
- double avg_1best_score = acc_1best_score/(double)in_sz;
- double avg_1best_model = acc_1best_model/(double)in_sz;
- double avg_1best_score_diff, avg_1best_model_diff;
- if ( t > 0 ) {
- avg_1best_score_diff = avg_1best_score - scores_per_iter[t-1][0];
- avg_1best_model_diff = avg_1best_model - scores_per_iter[t-1][1];
- } else {
- avg_1best_score_diff = avg_1best_score;
- avg_1best_model_diff = avg_1best_model;
- }
- cout << _prec5 << _pos << "WEIGHTS" << endl;
- for (vector<string>::iterator it = wprint.begin(); it != wprint.end(); it++) {
- cout << setw(16) << *it << " = " << dense_weights[FD::Convert( *it )] << endl;
- }
-
- cout << " ---" << endl;
- cout << _nopos << " avg score: " << avg_1best_score;
- cout << _pos << " (" << avg_1best_score_diff << ")" << endl;
- cout << _nopos << "avg model score: " << avg_1best_model;
- cout << _pos << " (" << avg_1best_model_diff << ")" << endl;
- vector<double> remember_scores;
- remember_scores.push_back( avg_1best_score );
- remember_scores.push_back( avg_1best_model );
- scores_per_iter.push_back( remember_scores );
- if ( avg_1best_score > max_score ) {
- max_score = avg_1best_score;
- best_t = t;
- }
-
- // close open files
- if ( input_fn != "-" ) input.close();
- close( grammar_buf );
- grammar_file.close();
-
- time ( &end );
- double time_dif = difftime( end, start );
- overall_time += time_dif;
- if ( !quiet ) {
- cout << _prec2 << _nopos << "(time " << time_dif/60. << " min, ";
- cout << time_dif/(double)in_sz<< " s/S)" << endl;
- }
-
- if ( t+1 != T ) cout << endl;
-
- if ( noup ) break;
-
- // write weights after every epoch
- std::string s;
- std::stringstream out;
- out << t;
- s = out.str();
- string weights_file = output_file + "." + s;
- weights.WriteToFile(weights_file, true );
-
- } // outer loop
-
- unlink( grammar_buf_tmp_fn );
- if ( !noup ) {
- if ( !quiet ) cout << endl << "writing weights file '" << cfg["output"].as<string>() << "' ...";
- weights.WriteToFile( cfg["output"].as<string>(), true );
- if ( !quiet ) cout << "done" << endl;
- }
-
- if ( !quiet ) {
- cout << _prec5 << _nopos << endl << "---" << endl << "Best iteration: ";
- cout << best_t+1 << " [SCORE '" << scorer_str << "'=" << max_score << "]." << endl;
- cout << _prec2 << "This took " << overall_time/60. << " min." << endl;
- }
-
- return 0;
-}
-
diff --git a/dtrain/test/mtm11/mira_update/sample.h b/dtrain/test/mtm11/mira_update/sample.h
deleted file mode 100644
index 5c331bba..00000000
--- a/dtrain/test/mtm11/mira_update/sample.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef _DTRAIN_SAMPLE_H_
-#define _DTRAIN_SAMPLE_H_
-
-
-#include "kbestget.h"
-
-
-namespace dtrain
-{
-
-
-struct TPair
-{
- SparseVector<double> first, second;
- size_t first_rank, second_rank;
- double first_score, second_score;
- double model_score_diff;
- double loss_diff;
-};
-
-typedef vector<TPair> TrainingInstances;
-
-
-void
- sample_all( KBestList* kb, TrainingInstances &training, size_t n_pairs )
-{
- std::vector<double> loss_diffs;
- TrainingInstances training_tmp;
- for ( size_t i = 0; i < kb->GetSize()-1; i++ ) {
- for ( size_t j = i+1; j < kb->GetSize(); j++ ) {
- TPair p;
- p.first = kb->feats[i];
- p.second = kb->feats[j];
- p.first_rank = i;
- p.second_rank = j;
- p.first_score = kb->scores[i];
- p.second_score = kb->scores[j];
-
- bool conservative = 1;
- if ( kb->scores[i] - kb->scores[j] < 0 ) {
- // j=hope, i=fear
- p.model_score_diff = kb->model_scores[j] - kb->model_scores[i];
- p.loss_diff = kb->scores[j] - kb->scores[i];
- training_tmp.push_back(p);
- loss_diffs.push_back(p.loss_diff);
- }
- else if (!conservative) {
- // i=hope, j=fear
- p.model_score_diff = kb->model_scores[i] - kb->model_scores[j];
- p.loss_diff = kb->scores[i] - kb->scores[j];
- training_tmp.push_back(p);
- loss_diffs.push_back(p.loss_diff);
- }
- }
- }
-
- if (training_tmp.size() > 0) {
- double threshold;
- std::sort(loss_diffs.begin(), loss_diffs.end());
- std::reverse(loss_diffs.begin(), loss_diffs.end());
- threshold = loss_diffs.size() >= n_pairs ? loss_diffs[n_pairs-1] : loss_diffs[loss_diffs.size()-1];
- cerr << "threshold: " << threshold << endl;
- size_t constraints = 0;
- for (size_t i = 0; (i < training_tmp.size() && constraints < n_pairs); ++i) {
- if (training_tmp[i].loss_diff >= threshold) {
- training.push_back(training_tmp[i]);
- constraints++;
- }
- }
- }
- else {
- cerr << "No pairs selected." << endl;
- }
-}
-
-void
-sample_rand( KBestList* kb, TrainingInstances &training )
-{
- srand( time(NULL) );
- for ( size_t i = 0; i < kb->GetSize()-1; i++ ) {
- for ( size_t j = i+1; j < kb->GetSize(); j++ ) {
- if ( rand() % 2 ) {
- TPair p;
- p.first = kb->feats[i];
- p.second = kb->feats[j];
- p.first_rank = i;
- p.second_rank = j;
- p.first_score = kb->scores[i];
- p.second_score = kb->scores[j];
- training.push_back( p );
- }
- }
- }
-}
-
-
-} // namespace
-
-
-#endif
-
diff --git a/dtrain/test/toy/dtrain.ini b/dtrain/test/toy/dtrain.ini
index abf22b94..a091732f 100644
--- a/dtrain/test/toy/dtrain.ini
+++ b/dtrain/test/toy/dtrain.ini
@@ -4,8 +4,8 @@ output=-
print_weights=logp shell_rule house_rule small_rule little_rule PassThrough
k=4
N=4
-epochs=3
-scorer=stupid_bleu
+epochs=2
+scorer=bleu
sample_from=kbest
filter=uniq
pair_sampling=all