summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Dyer <cdyer@cs.cmu.edu>2012-05-27 23:25:16 -0400
committerChris Dyer <cdyer@cs.cmu.edu>2012-05-27 23:25:16 -0400
commit104aad02a868c1fc6320276d9b3b9b0e1f41f457 (patch)
tree7bd9e092ed2d008024ae2834b3582c86c5394f7a
parentfc936db02d42cc3978a4cc2017efe7a15c78855d (diff)
fix mapper to use common candidate set code
-rw-r--r--pro-train/Makefile.am6
-rw-r--r--pro-train/mr_pro_map.cc174
-rw-r--r--training/Makefile.am30
-rw-r--r--training/candidate_set.cc169
-rw-r--r--training/candidate_set.h53
-rw-r--r--training/kbest_repository.cc37
-rw-r--r--training/kbest_repository.h19
7 files changed, 251 insertions, 237 deletions
diff --git a/pro-train/Makefile.am b/pro-train/Makefile.am
index a98dd245..1e9d46b0 100644
--- a/pro-train/Makefile.am
+++ b/pro-train/Makefile.am
@@ -2,12 +2,10 @@ bin_PROGRAMS = \
mr_pro_map \
mr_pro_reduce
-TESTS = lo_test
-
mr_pro_map_SOURCES = mr_pro_map.cc
-mr_pro_map_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz
+mr_pro_map_LDADD = $(top_srcdir)/training/libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a -lz
mr_pro_reduce_SOURCES = mr_pro_reduce.cc
mr_pro_reduce_LDADD = $(top_srcdir)/training/liblbfgs/liblbfgs.a $(top_srcdir)/utils/libutils.a -lz
-AM_CPPFLAGS = -W -Wall -Wno-sign-compare $(GTEST_CPPFLAGS) -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval -I$(top_srcdir)/training
+AM_CPPFLAGS = -W -Wall -Wno-sign-compare -I$(top_srcdir)/utils -I$(top_srcdir)/decoder -I$(top_srcdir)/mteval -I$(top_srcdir)/training
diff --git a/pro-train/mr_pro_map.cc b/pro-train/mr_pro_map.cc
index 52b67f32..2aa0dc6f 100644
--- a/pro-train/mr_pro_map.cc
+++ b/pro-train/mr_pro_map.cc
@@ -9,14 +9,13 @@
#include <boost/program_options.hpp>
#include <boost/program_options/variables_map.hpp>
+#include "candidate_set.h"
#include "sampler.h"
#include "filelib.h"
#include "stringlib.h"
#include "weights.h"
#include "inside_outside.h"
#include "hg_io.h"
-#include "kbest.h"
-#include "viterbi.h"
#include "ns.h"
#include "ns_docscorer.h"
@@ -25,52 +24,6 @@
using namespace std;
namespace po = boost::program_options;
-struct ApproxVectorHasher {
- static const size_t MASK = 0xFFFFFFFFull;
- union UType {
- double f; // leave as double
- size_t i;
- };
- static inline double round(const double x) {
- UType t;
- t.f = x;
- size_t r = t.i & MASK;
- if ((r << 1) > MASK)
- t.i += MASK - r + 1;
- else
- t.i &= (1ull - MASK);
- return t.f;
- }
- size_t operator()(const SparseVector<weight_t>& x) const {
- size_t h = 0x573915839;
- for (SparseVector<weight_t>::const_iterator it = x.begin(); it != x.end(); ++it) {
- UType t;
- t.f = it->second;
- if (t.f) {
- size_t z = (t.i >> 32);
- boost::hash_combine(h, it->first);
- boost::hash_combine(h, z);
- }
- }
- return h;
- }
-};
-
-struct ApproxVectorEquals {
- bool operator()(const SparseVector<weight_t>& a, const SparseVector<weight_t>& b) const {
- SparseVector<weight_t>::const_iterator bit = b.begin();
- for (SparseVector<weight_t>::const_iterator ait = a.begin(); ait != a.end(); ++ait) {
- if (bit == b.end() ||
- ait->first != bit->first ||
- ApproxVectorHasher::round(ait->second) != ApproxVectorHasher::round(bit->second))
- return false;
- ++bit;
- }
- if (bit != b.end()) return false;
- return true;
- }
-};
-
boost::shared_ptr<MT19937> rng;
void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
@@ -105,107 +58,6 @@ void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
}
}
-struct HypInfo {
- HypInfo() : g_(-100.0f) {}
- HypInfo(const vector<WordID>& h, const SparseVector<weight_t>& feats) : hyp(h), g_(-100.0f), x(feats) {}
-
- // lazy evaluation
- double g(const SegmentEvaluator& scorer, const EvaluationMetric* metric) const {
- if (g_ == -100.0f) {
- SufficientStats ss;
- scorer.Evaluate(hyp, &ss);
- g_ = metric->ComputeScore(ss);
- }
- return g_;
- }
- vector<WordID> hyp;
- mutable float g_;
- SparseVector<weight_t> x;
-};
-
-struct HypInfoCompare {
- bool operator()(const HypInfo& a, const HypInfo& b) const {
- ApproxVectorEquals comp;
- return (a.hyp == b.hyp && comp(a.x,b.x));
- }
-};
-
-struct HypInfoHasher {
- size_t operator()(const HypInfo& x) const {
- boost::hash<vector<WordID> > hhasher;
- ApproxVectorHasher vhasher;
- size_t ha = hhasher(x.hyp);
- boost::hash_combine(ha, vhasher(x.x));
- return ha;
- }
-};
-
-void WriteKBest(const string& file, const vector<HypInfo>& kbest) {
- WriteFile wf(file);
- ostream& out = *wf.stream();
- out.precision(10);
- for (int i = 0; i < kbest.size(); ++i) {
- out << TD::GetString(kbest[i].hyp) << endl;
- out << kbest[i].x << endl;
- }
-}
-
-void ParseSparseVector(string& line, size_t cur, SparseVector<weight_t>* out) {
- SparseVector<weight_t>& x = *out;
- size_t last_start = cur;
- size_t last_comma = string::npos;
- while(cur <= line.size()) {
- if (line[cur] == ' ' || cur == line.size()) {
- if (!(cur > last_start && last_comma != string::npos && cur > last_comma)) {
- cerr << "[ERROR] " << line << endl << " position = " << cur << endl;
- exit(1);
- }
- const int fid = FD::Convert(line.substr(last_start, last_comma - last_start));
- if (cur < line.size()) line[cur] = 0;
- const double val = strtod(&line[last_comma + 1], NULL);
- x.set_value(fid, val);
-
- last_comma = string::npos;
- last_start = cur+1;
- } else {
- if (line[cur] == '=')
- last_comma = cur;
- }
- ++cur;
- }
-}
-
-void ReadKBest(const string& file, vector<HypInfo>* kbest) {
- cerr << "Reading from " << file << endl;
- ReadFile rf(file);
- istream& in = *rf.stream();
- string cand;
- string feats;
- while(getline(in, cand)) {
- getline(in, feats);
- assert(in);
- kbest->push_back(HypInfo());
- TD::ConvertSentence(cand, &kbest->back().hyp);
- ParseSparseVector(feats, 0, &kbest->back().x);
- }
- cerr << " read " << kbest->size() << " hypotheses\n";
-}
-
-void Dedup(vector<HypInfo>* h) {
- cerr << "Dedup in=" << h->size();
- tr1::unordered_set<HypInfo, HypInfoHasher, HypInfoCompare> u;
- while(h->size() > 0) {
- u.insert(h->back());
- h->pop_back();
- }
- tr1::unordered_set<HypInfo, HypInfoHasher, HypInfoCompare>::iterator it = u.begin();
- while (it != u.end()) {
- h->push_back(*it);
- it = u.erase(it);
- }
- cerr << " out=" << h->size() << endl;
-}
-
struct ThresholdAlpha {
explicit ThresholdAlpha(double t = 0.05) : threshold(t) {}
double operator()(double mag) const {
@@ -239,7 +91,7 @@ struct DiffOrder {
void Sample(const unsigned gamma,
const unsigned xi,
- const vector<HypInfo>& J_i,
+ const training::CandidateSet& J_i,
const SegmentEvaluator& scorer,
const EvaluationMetric* metric,
vector<TrainingInstance>* pv) {
@@ -257,10 +109,10 @@ void Sample(const unsigned gamma,
const float gdiff = fabs(ga - gb);
if (!gdiff) continue;
avg_diff += gdiff;
- SparseVector<weight_t> xdiff = (J_i[a].x - J_i[b].x).erase_zeros();
+ SparseVector<weight_t> xdiff = (J_i[a].fmap - J_i[b].fmap).erase_zeros();
if (xdiff.empty()) {
- cerr << "Empty diff:\n " << TD::GetString(J_i[a].hyp) << endl << "x=" << J_i[a].x << endl;
- cerr << " " << TD::GetString(J_i[b].hyp) << endl << "x=" << J_i[b].x << endl;
+ cerr << "Empty diff:\n " << TD::GetString(J_i[a].ewords) << endl << "x=" << J_i[a].fmap << endl;
+ cerr << " " << TD::GetString(J_i[b].ewords) << endl << "x=" << J_i[b].fmap << endl;
continue;
}
v1.push_back(TrainingInstance(xdiff, positive, gdiff));
@@ -328,23 +180,15 @@ int main(int argc, char** argv) {
is >> file >> sent_id;
ReadFile rf(file);
ostringstream os;
- vector<HypInfo> J_i;
+ training::CandidateSet J_i;
os << kbest_repo << "/kbest." << sent_id << ".txt.gz";
const string kbest_file = os.str();
if (FileExists(kbest_file))
- ReadKBest(kbest_file, &J_i);
+ J_i.ReadFromFile(kbest_file);
HypergraphIO::ReadFromJSON(rf.stream(), &hg);
hg.Reweight(weights);
- KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(hg, kbest_size);
-
- for (int i = 0; i < kbest_size; ++i) {
- const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d =
- kbest.LazyKthBest(hg.nodes_.size() - 1, i);
- if (!d) break;
- J_i.push_back(HypInfo(d->yield, d->feature_values));
- }
- Dedup(&J_i);
- WriteKBest(kbest_file, J_i);
+ J_i.AddKBestCandidates(hg, kbest_size);
+ J_i.WriteToFile(kbest_file);
Sample(gamma, xi, J_i, *ds[sent_id], metric, &v);
for (unsigned i = 0; i < v.size(); ++i) {
diff --git a/training/Makefile.am b/training/Makefile.am
index 991ac210..8124b107 100644
--- a/training/Makefile.am
+++ b/training/Makefile.am
@@ -23,11 +23,17 @@ noinst_PROGRAMS = \
TESTS = lbfgs_test optimize_test
-mpi_online_optimize_SOURCES = mpi_online_optimize.cc online_optimizer.cc
-mpi_online_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
+noinst_LIBRARIES = libtraining.a
+libtraining_a_SOURCES = \
+ candidate_set.cc \
+ optimize.cc \
+ online_optimizer.cc
-mpi_flex_optimize_SOURCES = mpi_flex_optimize.cc online_optimizer.cc optimize.cc
-mpi_flex_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
+mpi_online_optimize_SOURCES = mpi_online_optimize.cc
+mpi_online_optimize_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
+
+mpi_flex_optimize_SOURCES = mpi_flex_optimize.cc
+mpi_flex_optimize_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
mpi_extract_reachable_SOURCES = mpi_extract_reachable.cc
mpi_extract_reachable_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
@@ -35,8 +41,8 @@ mpi_extract_reachable_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mtev
mpi_extract_features_SOURCES = mpi_extract_features.cc
mpi_extract_features_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
-mpi_batch_optimize_SOURCES = mpi_batch_optimize.cc optimize.cc
-mpi_batch_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
+mpi_batch_optimize_SOURCES = mpi_batch_optimize.cc
+mpi_batch_optimize_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
mpi_compute_cllh_SOURCES = mpi_compute_cllh.cc
mpi_compute_cllh_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz
@@ -50,14 +56,14 @@ test_ngram_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteva
model1_SOURCES = model1.cc ttables.cc
model1_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
-lbl_model_SOURCES = lbl_model.cc optimize.cc
-lbl_model_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
+lbl_model_SOURCES = lbl_model.cc
+lbl_model_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
grammar_convert_SOURCES = grammar_convert.cc
grammar_convert_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
-optimize_test_SOURCES = optimize_test.cc optimize.cc online_optimizer.cc
-optimize_test_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
+optimize_test_SOURCES = optimize_test.cc
+optimize_test_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
collapse_weights_SOURCES = collapse_weights.cc
collapse_weights_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
@@ -65,8 +71,8 @@ collapse_weights_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/lib
lbfgs_test_SOURCES = lbfgs_test.cc
lbfgs_test_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
-mr_optimize_reduce_SOURCES = mr_optimize_reduce.cc optimize.cc
-mr_optimize_reduce_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
+mr_optimize_reduce_SOURCES = mr_optimize_reduce.cc
+mr_optimize_reduce_LDADD = libtraining.a $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
mr_em_map_adapter_SOURCES = mr_em_map_adapter.cc
mr_em_map_adapter_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/utils/libutils.a -lz
diff --git a/training/candidate_set.cc b/training/candidate_set.cc
new file mode 100644
index 00000000..5ab4558a
--- /dev/null
+++ b/training/candidate_set.cc
@@ -0,0 +1,169 @@
+#include "candidate_set.h"
+
+#include <tr1/unordered_set>
+
+#include <boost/functional/hash.hpp>
+
+#include "ns.h"
+#include "filelib.h"
+#include "wordid.h"
+#include "tdict.h"
+#include "hg.h"
+#include "kbest.h"
+#include "viterbi.h"
+
+using namespace std;
+
+namespace training {
+
+struct ApproxVectorHasher {
+ static const size_t MASK = 0xFFFFFFFFull;
+ union UType {
+ double f; // leave as double
+ size_t i;
+ };
+ static inline double round(const double x) {
+ UType t;
+ t.f = x;
+ size_t r = t.i & MASK;
+ if ((r << 1) > MASK)
+ t.i += MASK - r + 1;
+ else
+ t.i &= (1ull - MASK);
+ return t.f;
+ }
+ size_t operator()(const SparseVector<double>& x) const {
+ size_t h = 0x573915839;
+ for (SparseVector<double>::const_iterator it = x.begin(); it != x.end(); ++it) {
+ UType t;
+ t.f = it->second;
+ if (t.f) {
+ size_t z = (t.i >> 32);
+ boost::hash_combine(h, it->first);
+ boost::hash_combine(h, z);
+ }
+ }
+ return h;
+ }
+};
+
+struct ApproxVectorEquals {
+ bool operator()(const SparseVector<double>& a, const SparseVector<double>& b) const {
+ SparseVector<double>::const_iterator bit = b.begin();
+ for (SparseVector<double>::const_iterator ait = a.begin(); ait != a.end(); ++ait) {
+ if (bit == b.end() ||
+ ait->first != bit->first ||
+ ApproxVectorHasher::round(ait->second) != ApproxVectorHasher::round(bit->second))
+ return false;
+ ++bit;
+ }
+ if (bit != b.end()) return false;
+ return true;
+ }
+};
+
+double Candidate::g(const SegmentEvaluator& scorer, const EvaluationMetric* metric) const {
+ if (g_ == -100.0f) {
+ SufficientStats ss;
+ scorer.Evaluate(ewords, &ss);
+ g_ = metric->ComputeScore(ss);
+ }
+ return g_;
+}
+
+struct CandidateCompare {
+ bool operator()(const Candidate& a, const Candidate& b) const {
+ ApproxVectorEquals eq;
+ return (a.ewords == b.ewords && eq(a.fmap,b.fmap));
+ }
+};
+
+struct CandidateHasher {
+ size_t operator()(const Candidate& x) const {
+ boost::hash<vector<WordID> > hhasher;
+ ApproxVectorHasher vhasher;
+ size_t ha = hhasher(x.ewords);
+ boost::hash_combine(ha, vhasher(x.fmap));
+ return ha;
+ }
+};
+
+void CandidateSet::WriteToFile(const string& file) const {
+ WriteFile wf(file);
+ ostream& out = *wf.stream();
+ out.precision(10);
+ for (unsigned i = 0; i < cs.size(); ++i) {
+ out << TD::GetString(cs[i].ewords) << endl;
+ out << cs[i].fmap << endl;
+ }
+}
+
+static void ParseSparseVector(string& line, size_t cur, SparseVector<double>* out) {
+ SparseVector<double>& x = *out;
+ size_t last_start = cur;
+ size_t last_comma = string::npos;
+ while(cur <= line.size()) {
+ if (line[cur] == ' ' || cur == line.size()) {
+ if (!(cur > last_start && last_comma != string::npos && cur > last_comma)) {
+ cerr << "[ERROR] " << line << endl << " position = " << cur << endl;
+ exit(1);
+ }
+ const int fid = FD::Convert(line.substr(last_start, last_comma - last_start));
+ if (cur < line.size()) line[cur] = 0;
+ const double val = strtod(&line[last_comma + 1], NULL);
+ x.set_value(fid, val);
+
+ last_comma = string::npos;
+ last_start = cur+1;
+ } else {
+ if (line[cur] == '=')
+ last_comma = cur;
+ }
+ ++cur;
+ }
+}
+
+void CandidateSet::ReadFromFile(const string& file) {
+ cerr << "Reading candidates from " << file << endl;
+ ReadFile rf(file);
+ istream& in = *rf.stream();
+ string cand;
+ string feats;
+ while(getline(in, cand)) {
+ getline(in, feats);
+ assert(in);
+ cs.push_back(Candidate());
+ TD::ConvertSentence(cand, &cs.back().ewords);
+ ParseSparseVector(feats, 0, &cs.back().fmap);
+ }
+ cerr << " read " << cs.size() << " candidates\n";
+}
+
+void CandidateSet::Dedup() {
+ cerr << "Dedup in=" << cs.size();
+ tr1::unordered_set<Candidate, CandidateHasher, CandidateCompare> u;
+ while(cs.size() > 0) {
+ u.insert(cs.back());
+ cs.pop_back();
+ }
+ tr1::unordered_set<Candidate, CandidateHasher, CandidateCompare>::iterator it = u.begin();
+ while (it != u.end()) {
+ cs.push_back(*it);
+ it = u.erase(it);
+ }
+ cerr << " out=" << cs.size() << endl;
+}
+
+void CandidateSet::AddKBestCandidates(const Hypergraph& hg, size_t kbest_size) {
+ KBest::KBestDerivations<vector<WordID>, ESentenceTraversal> kbest(hg, kbest_size);
+
+ for (unsigned i = 0; i < kbest_size; ++i) {
+ const KBest::KBestDerivations<vector<WordID>, ESentenceTraversal>::Derivation* d =
+ kbest.LazyKthBest(hg.nodes_.size() - 1, i);
+ if (!d) break;
+ cs.push_back(Candidate(d->yield, d->feature_values));
+ }
+ Dedup();
+}
+
+}
diff --git a/training/candidate_set.h b/training/candidate_set.h
new file mode 100644
index 00000000..e2b0b1ba
--- /dev/null
+++ b/training/candidate_set.h
@@ -0,0 +1,53 @@
+#ifndef _CANDIDATE_SET_H_
+#define _CANDIDATE_SET_H_
+
+#include <vector>
+#include <algorithm>
+
+#include "wordid.h"
+#include "sparse_vector.h"
+
+class Hypergraph;
+struct SegmentEvaluator;
+struct EvaluationMetric;
+
+namespace training {
+
+struct Candidate {
+ Candidate() : g_(-100.0f) {}
+ Candidate(const std::vector<WordID>& e, const SparseVector<double>& fm) : ewords(e), fmap(fm), g_(-100.0f) {}
+ std::vector<WordID> ewords;
+ SparseVector<double> fmap;
+ double g(const SegmentEvaluator& scorer, const EvaluationMetric* metric) const;
+ void swap(Candidate& other) {
+ std::swap(g_, other.g_);
+ ewords.swap(other.ewords);
+ fmap.swap(other.fmap);
+ }
+ private:
+ mutable float g_;
+ //SufficientStats score_stats;
+};
+
+// represents some kind of collection of translation candidates, e.g.
+// aggregated k-best lists, sample lists, etc.
+class CandidateSet {
+ public:
+ CandidateSet() {}
+ inline size_t size() const { return cs.size(); }
+ const Candidate& operator[](size_t i) const { return cs[i]; }
+
+ void ReadFromFile(const std::string& file);
+ void WriteToFile(const std::string& file) const;
+ void AddKBestCandidates(const Hypergraph& hg, size_t kbest_size);
+ // TODO add code to do unique k-best
+ // TODO add code to draw k samples
+
+ private:
+ void Dedup();
+ std::vector<Candidate> cs;
+};
+
+}
+
+#endif
diff --git a/training/kbest_repository.cc b/training/kbest_repository.cc
deleted file mode 100644
index 145b40a2..00000000
--- a/training/kbest_repository.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-#include "kbest_repository.h"
-
-#include <boost/functional/hash.hpp>
-
-using namespace std;
-
-struct ApproxVectorHasher {
- static const size_t MASK = 0xFFFFFFFFull;
- union UType {
- double f; // leave as double
- size_t i;
- };
- static inline double round(const double x) {
- UType t;
- t.f = x;
- size_t r = t.i & MASK;
- if ((r << 1) > MASK)
- t.i += MASK - r + 1;
- else
- t.i &= (1ull - MASK);
- return t.f;
- }
- size_t operator()(const SparseVector<double>& x) const {
- size_t h = 0x573915839;
- for (SparseVector<double>::const_iterator it = x.begin(); it != x.end(); ++it) {
- UType t;
- t.f = it->second;
- if (t.f) {
- size_t z = (t.i >> 32);
- boost::hash_combine(h, it->first);
- boost::hash_combine(h, z);
- }
- }
- return h;
- }
-};
-
diff --git a/training/kbest_repository.h b/training/kbest_repository.h
deleted file mode 100644
index 0345394a..00000000
--- a/training/kbest_repository.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _KBEST_REPOSITORY_H_
-#define _KBEST_REPOSITORY_H_
-
-#include <vector>
-#include "wordid.h"
-#include "ns.h"
-#include "sparse_vector.h"
-
-class KBestRepository {
- struct HypInfo {
- std::vector<WordID> words;
- SparseVector<double> x;
- SufficientStats score_stats;
- };
-
- std::vector<HypInfo> candidates;
-};
-
-#endif