summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
authorChris Dyer <redpony@gmail.com>2010-02-18 17:06:59 -0500
committerChris Dyer <redpony@gmail.com>2010-02-18 17:06:59 -0500
commit4d47dbd7da0434de67ac619392d516c678e1f2ca (patch)
treefdb327696aa30e79983602c0e7d5fde372efbde5 /training
parentc97b8a8b58f7385fb48b74e2cf1ea9610cd1202f (diff)
add generative word alignment model and primitive EM trainer. Model 1 and HMM are supported, without NULL source words
Diffstat (limited to 'training')
-rw-r--r--training/mr_em_adapted_reduce.cc194
-rw-r--r--training/mr_em_map_adapter.cc160
-rw-r--r--training/mr_em_train.cc270
-rw-r--r--training/mr_reduce_to_weights.cc109
4 files changed, 463 insertions, 270 deletions
diff --git a/training/mr_em_adapted_reduce.cc b/training/mr_em_adapted_reduce.cc
new file mode 100644
index 00000000..52387e7f
--- /dev/null
+++ b/training/mr_em_adapted_reduce.cc
@@ -0,0 +1,194 @@
+#include <iostream>
+#include <vector>
+#include <cassert>
+#include <cmath>
+
+#include <boost/program_options.hpp>
+#include <boost/program_options/variables_map.hpp>
+
+#include "config.h"
+#ifdef HAVE_BOOST_DIGAMMA
+#include <boost/math/special_functions/digamma.hpp>
+using boost::math::digamma;
+#endif
+
+#include "filelib.h"
+#include "fdict.h"
+#include "weights.h"
+#include "sparse_vector.h"
+
+using namespace std;
+namespace po = boost::program_options;
+
+#ifndef HAVE_BOOST_DIGAMMA
+#warning Using Mark Johnsons digamma()
+double digamma(double x) {
+ double result = 0, xx, xx2, xx4;
+ assert(x > 0);
+ for ( ; x < 7; ++x)
+ result -= 1/x;
+ x -= 1.0/2.0;
+ xx = 1.0/x;
+ xx2 = xx*xx;
+ xx4 = xx2*xx2;
+ result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4;
+ return result;
+}
+#endif
+
+void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
+ po::options_description opts("Configuration options");
+ opts.add_options()
+ ("optimization_method,m", po::value<string>()->default_value("em"), "Optimization method (em, vb)")
+ ("input_format,f",po::value<string>()->default_value("b64"),"Encoding of the input (b64 or text)");
+ po::options_description clo("Command line options");
+ clo.add_options()
+ ("config", po::value<string>(), "Configuration file")
+ ("help,h", "Print this help message and exit");
+ po::options_description dconfig_options, dcmdline_options;
+ dconfig_options.add(opts);
+ dcmdline_options.add(opts).add(clo);
+
+ po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
+ if (conf->count("config")) {
+ ifstream config((*conf)["config"].as<string>().c_str());
+ po::store(po::parse_config_file(config, dconfig_options), *conf);
+ }
+ po::notify(*conf);
+
+ if (conf->count("help")) {
+ cerr << dcmdline_options << endl;
+ exit(1);
+ }
+}
+
+double NoZero(const double& x) {
+ if (x) return x;
+ return 1e-35;
+}
+
+void Maximize(const bool use_vb,
+ const double& alpha,
+ const int total_event_types,
+ SparseVector<double>* pc) {
+ const SparseVector<double>& counts = *pc;
+
+ if (use_vb)
+ assert(total_event_types >= counts.num_active());
+
+ double tot = 0;
+ for (SparseVector<double>::const_iterator it = counts.begin();
+ it != counts.end(); ++it)
+ tot += it->second;
+// cerr << " = " << tot << endl;
+ assert(tot > 0.0);
+ double ltot = log(tot);
+ if (use_vb)
+ ltot = digamma(tot + total_event_types * alpha);
+ for (SparseVector<double>::const_iterator it = counts.begin();
+ it != counts.end(); ++it) {
+ if (use_vb) {
+ pc->set_value(it->first, NoZero(digamma(it->second + alpha) - ltot));
+ } else {
+ pc->set_value(it->first, NoZero(log(it->second) - ltot));
+ }
+ }
+#if 0
+ if (counts.num_active() < 50) {
+ for (SparseVector<double>::const_iterator it = counts.begin();
+ it != counts.end(); ++it) {
+ cerr << " p(" << FD::Convert(it->first) << ")=" << exp(it->second);
+ }
+ cerr << endl;
+ }
+#endif
+}
+
+int main(int argc, char** argv) {
+ po::variables_map conf;
+ InitCommandLine(argc, argv, &conf);
+
+ const bool use_b64 = conf["input_format"].as<string>() == "b64";
+ const bool use_vb = conf["optimization_method"].as<string>() == "vb";
+ const double alpha = 1e-09;
+ if (use_vb)
+ cerr << "Using variational Bayes, make sure alphas are set\n";
+
+ const string s_obj = "**OBJ**";
+ // E-step
+ string cur_key = "";
+ SparseVector<double> acc;
+ double logprob = 0;
+ while(cin) {
+ string line;
+ getline(cin, line);
+ if (line.empty()) continue;
+ int feat;
+ double val;
+ size_t i = line.find("\t");
+ const string key = line.substr(0, i);
+ assert(i != string::npos);
+ ++i;
+ if (key != cur_key) {
+ if (cur_key.size() > 0) {
+ // TODO shouldn't be num_active, should be total number
+ // of events
+ Maximize(use_vb, alpha, acc.num_active(), &acc);
+ cout << cur_key << '\t';
+ if (use_b64)
+ B64::Encode(0.0, acc, &cout);
+ else
+ cout << acc;
+ cout << endl;
+ acc.clear();
+ }
+ cur_key = key;
+ }
+ if (use_b64) {
+ SparseVector<double> g;
+ double obj;
+ if (!B64::Decode(&obj, &g, &line[i], line.size() - i)) {
+ cerr << "B64 decoder returned error, skipping!\n";
+ continue;
+ }
+ logprob += obj;
+ acc += g;
+ } else { // text encoding - your counts will not be accurate!
+ while (i < line.size()) {
+ size_t start = i;
+ while (line[i] != '=' && i < line.size()) ++i;
+ if (i == line.size()) { cerr << "FORMAT ERROR\n"; break; }
+ string fname = line.substr(start, i - start);
+ if (fname == s_obj) {
+ feat = -1;
+ } else {
+ feat = FD::Convert(line.substr(start, i - start));
+ }
+ ++i;
+ start = i;
+ while (line[i] != ';' && i < line.size()) ++i;
+ if (i - start == 0) continue;
+ val = atof(line.substr(start, i - start).c_str());
+ ++i;
+ if (feat == -1) {
+ logprob += val;
+ } else {
+ acc.add_value(feat, val);
+ }
+ }
+ }
+ }
+ // TODO shouldn't be num_active, should be total number
+ // of events
+ Maximize(use_vb, alpha, acc.num_active(), &acc);
+ cout << cur_key << '\t';
+ if (use_b64)
+ B64::Encode(0.0, acc, &cout);
+ else
+ cout << acc;
+ cout << endl << flush;
+
+ cerr << "LOGPROB: " << logprob << endl;
+
+ return 0;
+}
diff --git a/training/mr_em_map_adapter.cc b/training/mr_em_map_adapter.cc
new file mode 100644
index 00000000..a98e1b77
--- /dev/null
+++ b/training/mr_em_map_adapter.cc
@@ -0,0 +1,160 @@
+#include <iostream>
+#include <fstream>
+#include <cassert>
+#include <cmath>
+
+#include <boost/utility.hpp>
+#include <boost/program_options.hpp>
+#include <boost/program_options/variables_map.hpp>
+#include "boost/tuple/tuple.hpp"
+
+#include "fdict.h"
+#include "sparse_vector.h"
+
+using namespace std;
+namespace po = boost::program_options;
+
+// useful for EM models parameterized by a bunch of multinomials
+// this converts event counts (returned from cdec as feature expectations)
+// into different keys and values (which are lists of all the events,
+// conditioned on the key) for summing and normalization by a reducer
+
+void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
+ po::options_description opts("Configuration options");
+ opts.add_options()
+ ("buffer_size,b", po::value<int>()->default_value(1), "Buffer size (in # of counts) before emitting counts")
+ ("format,f",po::value<string>()->default_value("b64"), "Encoding of the input (b64 or text)");
+ po::options_description clo("Command line options");
+ clo.add_options()
+ ("config", po::value<string>(), "Configuration file")
+ ("help,h", "Print this help message and exit");
+ po::options_description dconfig_options, dcmdline_options;
+ dconfig_options.add(opts);
+ dcmdline_options.add(opts).add(clo);
+
+ po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
+ if (conf->count("config")) {
+ ifstream config((*conf)["config"].as<string>().c_str());
+ po::store(po::parse_config_file(config, dconfig_options), *conf);
+ }
+ po::notify(*conf);
+
+ if (conf->count("help")) {
+ cerr << dcmdline_options << endl;
+ exit(1);
+ }
+}
+
+struct EventMapper {
+ int Map(int fid) {
+ int& cv = map_[fid];
+ if (!cv) {
+ cv = GetConditioningVariable(fid);
+ }
+ return cv;
+ }
+ void Clear() { map_.clear(); }
+ protected:
+ virtual int GetConditioningVariable(int fid) const = 0;
+ private:
+ map<int, int> map_;
+};
+
+struct LexAlignEventMapper : public EventMapper {
+ protected:
+ virtual int GetConditioningVariable(int fid) const {
+ const string& str = FD::Convert(fid);
+ size_t pos = str.rfind("_");
+ if (pos == string::npos || pos == 0 || pos >= str.size() - 1) {
+ cerr << "Bad feature for EM adapter: " << str << endl;
+ abort();
+ }
+ return FD::Convert(str.substr(0, pos));
+ }
+};
+
+int main(int argc, char** argv) {
+ po::variables_map conf;
+ InitCommandLine(argc, argv, &conf);
+
+ const bool use_b64 = conf["format"].as<string>() == "b64";
+ const int buffer_size = conf["buffer_size"].as<int>();
+
+ const string s_obj = "**OBJ**";
+ // 0<TAB>**OBJ**=12.2;Feat1=2.3;Feat2=-0.2;
+ // 0<TAB>**OBJ**=1.1;Feat1=1.0;
+
+ EventMapper* event_mapper = new LexAlignEventMapper;
+ map<int, SparseVector<double> > counts;
+ size_t total = 0;
+ while(cin) {
+ string line;
+ getline(cin, line);
+ if (line.empty()) continue;
+ int feat;
+ double val;
+ size_t i = line.find("\t");
+ assert(i != string::npos);
+ ++i;
+ SparseVector<double> g;
+ double obj = 0;
+ if (use_b64) {
+ if (!B64::Decode(&obj, &g, &line[i], line.size() - i)) {
+ cerr << "B64 decoder returned error, skipping!\n";
+ continue;
+ }
+ } else { // text encoding - your counts will not be accurate!
+ while (i < line.size()) {
+ size_t start = i;
+ while (line[i] != '=' && i < line.size()) ++i;
+ if (i == line.size()) { cerr << "FORMAT ERROR\n"; break; }
+ string fname = line.substr(start, i - start);
+ if (fname == s_obj) {
+ feat = -1;
+ } else {
+ feat = FD::Convert(line.substr(start, i - start));
+ }
+ ++i;
+ start = i;
+ while (line[i] != ';' && i < line.size()) ++i;
+ if (i - start == 0) continue;
+ val = atof(line.substr(start, i - start).c_str());
+ ++i;
+ if (feat == -1) {
+ obj = val;
+ } else {
+ g.set_value(feat, val);
+ }
+ }
+ }
+ //cerr << "OBJ: " << obj << endl;
+ const SparseVector<double>& cg = g;
+ for (SparseVector<double>::const_iterator it = cg.begin(); it != cg.end(); ++it) {
+ const int cond_var = event_mapper->Map(it->first);
+ SparseVector<double>& cond_counts = counts[cond_var];
+ int delta = cond_counts.num_active();
+ cond_counts.add_value(it->first, it->second);
+ delta = cond_counts.num_active() - delta;
+ total += delta;
+ }
+ if (total > buffer_size) {
+ for (map<int, SparseVector<double> >::iterator it = counts.begin();
+ it != counts.end(); ++it) {
+ const SparseVector<double>& cc = it->second;
+ cout << FD::Convert(it->first) << '\t';
+ if (use_b64) {
+ B64::Encode(0.0, cc, &cout);
+ } else {
+ abort();
+ }
+ cout << endl;
+ }
+ cout << flush;
+ total = 0;
+ counts.clear();
+ }
+ }
+
+ return 0;
+}
+
diff --git a/training/mr_em_train.cc b/training/mr_em_train.cc
deleted file mode 100644
index a15fbe4c..00000000
--- a/training/mr_em_train.cc
+++ /dev/null
@@ -1,270 +0,0 @@
-#include <iostream>
-#include <vector>
-#include <cassert>
-#include <cmath>
-
-#include <boost/program_options.hpp>
-#include <boost/program_options/variables_map.hpp>
-
-#include "config.h"
-#ifdef HAVE_BOOST_DIGAMMA
-#include <boost/math/special_functions/digamma.hpp>
-using boost::math::digamma;
-#endif
-
-#include "tdict.h"
-#include "filelib.h"
-#include "trule.h"
-#include "fdict.h"
-#include "weights.h"
-#include "sparse_vector.h"
-
-using namespace std;
-using boost::shared_ptr;
-namespace po = boost::program_options;
-
-#ifndef HAVE_BOOST_DIGAMMA
-#warning Using Mark Johnson's digamma()
-double digamma(double x) {
- double result = 0, xx, xx2, xx4;
- assert(x > 0);
- for ( ; x < 7; ++x)
- result -= 1/x;
- x -= 1.0/2.0;
- xx = 1.0/x;
- xx2 = xx*xx;
- xx4 = xx2*xx2;
- result += log(x)+(1./24.)*xx2-(7.0/960.0)*xx4+(31.0/8064.0)*xx4*xx2-(127.0/30720.0)*xx4*xx4;
- return result;
-}
-#endif
-
-void SanityCheck(const vector<double>& w) {
- for (int i = 0; i < w.size(); ++i) {
- assert(!isnan(w[i]));
- }
-}
-
-struct FComp {
- const vector<double>& w_;
- FComp(const vector<double>& w) : w_(w) {}
- bool operator()(int a, int b) const {
- return w_[a] > w_[b];
- }
-};
-
-void ShowLargestFeatures(const vector<double>& w) {
- vector<int> fnums(w.size() - 1);
- for (int i = 1; i < w.size(); ++i)
- fnums[i-1] = i;
- vector<int>::iterator mid = fnums.begin();
- mid += (w.size() > 10 ? 10 : w.size()) - 1;
- partial_sort(fnums.begin(), mid, fnums.end(), FComp(w));
- cerr << "MOST PROBABLE:";
- for (vector<int>::iterator i = fnums.begin(); i != mid; ++i) {
- cerr << ' ' << FD::Convert(*i) << '=' << w[*i];
- }
- cerr << endl;
-}
-
-void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
- po::options_description opts("Configuration options");
- opts.add_options()
- ("output,o",po::value<string>()->default_value("-"),"Output log probs file")
- ("grammar,g",po::value<vector<string> >()->composing(),"SCFG grammar file(s)")
- ("optimization_method,m", po::value<string>()->default_value("em"), "Optimization method (em, vb)")
- ("input_format,f",po::value<string>()->default_value("b64"),"Encoding of the input (b64 or text)");
- po::options_description clo("Command line options");
- clo.add_options()
- ("config", po::value<string>(), "Configuration file")
- ("help,h", "Print this help message and exit");
- po::options_description dconfig_options, dcmdline_options;
- dconfig_options.add(opts);
- dcmdline_options.add(opts).add(clo);
-
- po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
- if (conf->count("config")) {
- ifstream config((*conf)["config"].as<string>().c_str());
- po::store(po::parse_config_file(config, dconfig_options), *conf);
- }
- po::notify(*conf);
-
- if (conf->count("help") || !conf->count("grammar")) {
- cerr << dcmdline_options << endl;
- exit(1);
- }
-}
-
-// describes a multinomial or multinomial with a prior
-// does not contain the parameters- just the list of events
-// and any hyperparameters
-struct MultinomialInfo {
- MultinomialInfo() : alpha(1.0) {}
- vector<int> events; // the events that this multinomial generates
- double alpha; // hyperparameter for (optional) Dirichlet prior
-};
-
-typedef map<WordID, MultinomialInfo> ModelDefinition;
-
-void LoadModelEvents(const po::variables_map& conf, ModelDefinition* pm) {
- ModelDefinition& m = *pm;
- m.clear();
- vector<string> gfiles = conf["grammar"].as<vector<string> >();
- for (int i = 0; i < gfiles.size(); ++i) {
- ReadFile rf(gfiles[i]);
- istream& in = *rf.stream();
- int lc = 0;
- while(in) {
- string line;
- getline(in, line);
- if (line.empty()) continue;
- ++lc;
- TRule r(line, true);
- const SparseVector<double>& f = r.GetFeatureValues();
- if (f.num_active() == 0) {
- cerr << "[WARNING] no feature found in " << gfiles[i] << ':' << lc << endl;
- continue;
- }
- if (f.num_active() > 1) {
- cerr << "[ERROR] more than one feature found in " << gfiles[i] << ':' << lc << endl;
- exit(1);
- }
- SparseVector<double>::const_iterator it = f.begin();
- if (it->second != 1.0) {
- cerr << "[ERROR] feature with value != 1 found in " << gfiles[i] << ':' << lc << endl;
- exit(1);
- }
- m[r.GetLHS()].events.push_back(it->first);
- }
- }
- for (ModelDefinition::iterator it = m.begin(); it != m.end(); ++it) {
- const vector<int>& v = it->second.events;
- cerr << "Multinomial [" << TD::Convert(it->first*-1) << "]\n";
- if (v.size() < 1000) {
- cerr << " generates:";
- for (int i = 0; i < v.size(); ++i) {
- cerr << " " << FD::Convert(v[i]);
- }
- cerr << endl;
- }
- }
-}
-
-void Maximize(const ModelDefinition& m, const bool use_vb, vector<double>* counts) {
- for (ModelDefinition::const_iterator it = m.begin(); it != m.end(); ++it) {
- const MultinomialInfo& mult_info = it->second;
- const vector<int>& events = mult_info.events;
- cerr << "Multinomial [" << TD::Convert(it->first*-1) << "]";
- double tot = 0;
- for (int i = 0; i < events.size(); ++i)
- tot += (*counts)[events[i]];
- cerr << " = " << tot << endl;
- assert(tot > 0.0);
- double ltot = log(tot);
- if (use_vb)
- ltot = digamma(tot + events.size() * mult_info.alpha);
- for (int i = 0; i < events.size(); ++i) {
- if (use_vb) {
- (*counts)[events[i]] = digamma((*counts)[events[i]] + mult_info.alpha) - ltot;
- } else {
- (*counts)[events[i]] = log((*counts)[events[i]]) - ltot;
- }
- }
- if (events.size() < 50) {
- for (int i = 0; i < events.size(); ++i) {
- cerr << " p(" << FD::Convert(events[i]) << ")=" << exp((*counts)[events[i]]);
- }
- cerr << endl;
- }
- }
-}
-
-int main(int argc, char** argv) {
- po::variables_map conf;
- InitCommandLine(argc, argv, &conf);
-
- const bool use_b64 = conf["input_format"].as<string>() == "b64";
- const bool use_vb = conf["optimization_method"].as<string>() == "vb";
- if (use_vb)
- cerr << "Using variational Bayes, make sure alphas are set\n";
-
- ModelDefinition model_def;
- LoadModelEvents(conf, &model_def);
-
- const string s_obj = "**OBJ**";
- int num_feats = FD::NumFeats();
- cerr << "Number of features: " << num_feats << endl;
-
- vector<double> counts(num_feats, 0);
- double logprob = 0;
- // 0<TAB>**OBJ**=12.2;Feat1=2.3;Feat2=-0.2;
- // 0<TAB>**OBJ**=1.1;Feat1=1.0;
-
- // E-step
- while(cin) {
- string line;
- getline(cin, line);
- if (line.empty()) continue;
- int feat;
- double val;
- size_t i = line.find("\t");
- assert(i != string::npos);
- ++i;
- if (use_b64) {
- SparseVector<double> g;
- double obj;
- if (!B64::Decode(&obj, &g, &line[i], line.size() - i)) {
- cerr << "B64 decoder returned error, skipping!\n";
- continue;
- }
- logprob += obj;
- const SparseVector<double>& cg = g;
- for (SparseVector<double>::const_iterator it = cg.begin(); it != cg.end(); ++it) {
- if (it->first >= num_feats) {
- cerr << "Unexpected feature: " << FD::Convert(it->first) << endl;
- abort();
- }
- counts[it->first] += it->second;
- }
- } else { // text encoding - your counts will not be accurate!
- while (i < line.size()) {
- size_t start = i;
- while (line[i] != '=' && i < line.size()) ++i;
- if (i == line.size()) { cerr << "FORMAT ERROR\n"; break; }
- string fname = line.substr(start, i - start);
- if (fname == s_obj) {
- feat = -1;
- } else {
- feat = FD::Convert(line.substr(start, i - start));
- if (feat >= num_feats) {
- cerr << "Unexpected feature: " << line.substr(start, i - start) << endl;
- abort();
- }
- }
- ++i;
- start = i;
- while (line[i] != ';' && i < line.size()) ++i;
- if (i - start == 0) continue;
- val = atof(line.substr(start, i - start).c_str());
- ++i;
- if (feat == -1) {
- logprob += val;
- } else {
- counts[feat] += val;
- }
- }
- }
- }
-
- cerr << "LOGPROB: " << logprob << endl;
- // M-step
- Maximize(model_def, use_vb, &counts);
-
- SanityCheck(counts);
- ShowLargestFeatures(counts);
- Weights weights;
- weights.InitFromVector(counts);
- weights.WriteToFile(conf["output"].as<string>(), false);
-
- return 0;
-}
diff --git a/training/mr_reduce_to_weights.cc b/training/mr_reduce_to_weights.cc
new file mode 100644
index 00000000..16b47720
--- /dev/null
+++ b/training/mr_reduce_to_weights.cc
@@ -0,0 +1,109 @@
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include <cassert>
+
+#include <boost/program_options.hpp>
+#include <boost/program_options/variables_map.hpp>
+
+#include "filelib.h"
+#include "fdict.h"
+#include "weights.h"
+#include "sparse_vector.h"
+
+using namespace std;
+namespace po = boost::program_options;
+
+void InitCommandLine(int argc, char** argv, po::variables_map* conf) {
+ po::options_description opts("Configuration options");
+ opts.add_options()
+ ("input_format,f",po::value<string>()->default_value("b64"),"Encoding of the input (b64 or text)")
+ ("input,i",po::value<string>()->default_value("-"),"Read file from")
+ ("output,o",po::value<string>()->default_value("-"),"Write weights to");
+ po::options_description clo("Command line options");
+ clo.add_options()
+ ("config", po::value<string>(), "Configuration file")
+ ("help,h", "Print this help message and exit");
+ po::options_description dconfig_options, dcmdline_options;
+ dconfig_options.add(opts);
+ dcmdline_options.add(opts).add(clo);
+
+ po::store(parse_command_line(argc, argv, dcmdline_options), *conf);
+ if (conf->count("config")) {
+ ifstream config((*conf)["config"].as<string>().c_str());
+ po::store(po::parse_config_file(config, dconfig_options), *conf);
+ }
+ po::notify(*conf);
+
+ if (conf->count("help")) {
+ cerr << dcmdline_options << endl;
+ exit(1);
+ }
+}
+
+void WriteWeights(const SparseVector<double>& weights, ostream* out) {
+ for (SparseVector<double>::const_iterator it = weights.begin();
+ it != weights.end(); ++it) {
+ (*out) << FD::Convert(it->first) << " " << it->second << endl;
+ }
+}
+
+int main(int argc, char** argv) {
+ po::variables_map conf;
+ InitCommandLine(argc, argv, &conf);
+
+ const bool use_b64 = conf["input_format"].as<string>() == "b64";
+
+ const string s_obj = "**OBJ**";
+ // E-step
+ ReadFile rf(conf["input"].as<string>());
+ istream* in = rf.stream();
+ assert(*in);
+ WriteFile wf(conf["output"].as<string>());
+ ostream* out = wf.stream();
+ out->precision(17);
+ while(*in) {
+ string line;
+ getline(*in, line);
+ if (line.empty()) continue;
+ int feat;
+ double val;
+ size_t i = line.find("\t");
+ assert(i != string::npos);
+ ++i;
+ if (use_b64) {
+ SparseVector<double> g;
+ double obj;
+ if (!B64::Decode(&obj, &g, &line[i], line.size() - i)) {
+ cerr << "B64 decoder returned error, skipping!\n";
+ continue;
+ }
+ WriteWeights(g, out);
+ } else { // text encoding - your counts will not be accurate!
+ SparseVector<double> weights;
+ while (i < line.size()) {
+ size_t start = i;
+ while (line[i] != '=' && i < line.size()) ++i;
+ if (i == line.size()) { cerr << "FORMAT ERROR\n"; break; }
+ string fname = line.substr(start, i - start);
+ if (fname == s_obj) {
+ feat = -1;
+ } else {
+ feat = FD::Convert(line.substr(start, i - start));
+ }
+ ++i;
+ start = i;
+ while (line[i] != ';' && i < line.size()) ++i;
+ if (i - start == 0) continue;
+ val = atof(line.substr(start, i - start).c_str());
+ ++i;
+ if (feat != -1) {
+ weights.set_value(feat, val);
+ }
+ }
+ WriteWeights(weights, out);
+ }
+ }
+
+ return 0;
+}