diff options
author | Chris Dyer <cdyer@cs.cmu.edu> | 2012-05-13 16:18:43 -0700 |
---|---|---|
committer | Chris Dyer <cdyer@cs.cmu.edu> | 2012-05-13 16:18:43 -0700 |
commit | b56da6f08c4f59b562a102671ac3deb135b0538a (patch) | |
tree | e97badb0a2e8c4ceff9468bcbc0d9458f935381c | |
parent | 5d2fba19f7989b8a2c55834a5735f5fd5b60197c (diff) |
fast creg training code for univariate linear and logistic regression
-rw-r--r-- | BUILDING | 17 | ||||
-rw-r--r-- | training/Makefile.am | 4 | ||||
-rw-r--r-- | training/creg.cc | 334 | ||||
-rw-r--r-- | training/liblbfgs/lbfgs++.h | 29 | ||||
-rw-r--r-- | training/liblbfgs/ll_test.cc | 4 | ||||
-rw-r--r-- | utils/Makefile.am | 4 | ||||
-rw-r--r-- | utils/fast_sparse_vector.h | 16 | ||||
-rw-r--r-- | utils/json_feature_map_lexer.h | 15 | ||||
-rw-r--r-- | utils/json_feature_map_lexer.ll | 132 |
9 files changed, 527 insertions, 28 deletions
@@ -1,6 +1,5 @@ To build cdec, you'll need: - * Google c++ testing framework (http://code.google.com/p/googletest/) * boost headers & boost program_options (you may need to install a package like boost-devel) @@ -8,20 +7,14 @@ To build cdec, you'll need: Instructions for building ----------------------------------- - 1) Optional: Download and build SRILM - - 2) Download, build, and install Google Test (optional, this is necessary - to build unit tests that may be useful in development; system tests - work without it) - - 3) Use automake / autoconf to generate the configure script. + 1) Use automake / autoconf to generate the configure script. I'm not an expert at using these tools, but this should be sufficient: autoreconf -ifv - 4) Configure and build. Your command will look something like this. + 2) Configure and build. Your command will look something like this. - ./configure --disable-gtest + ./configure make If you get errors during configure about missing BOOST macros, then step 3 @@ -36,12 +29,12 @@ Instructions for building make LIBS+="-lz -lboost_program_options" \ CFLAGS+="-Wno-sign-compare" - 5) Test + 3) Test ./tests/run-system-tests.pl Everything should pass. - 6) Enjoy! + 4) Enjoy! diff --git a/training/Makefile.am b/training/Makefile.am index 991ac210..4b69ea94 100644 --- a/training/Makefile.am +++ b/training/Makefile.am @@ -1,5 +1,6 @@ bin_PROGRAMS = \ model1 \ + creg \ lbl_model \ test_ngram \ mr_em_map_adapter \ @@ -23,6 +24,9 @@ noinst_PROGRAMS = \ TESTS = lbfgs_test optimize_test +creg_SOURCES = creg.cc +creg_LDADD = ./liblbfgs/liblbfgs.a $(top_srcdir)/utils/libutils.a -lz + mpi_online_optimize_SOURCES = mpi_online_optimize.cc online_optimizer.cc mpi_online_optimize_LDADD = $(top_srcdir)/decoder/libcdec.a $(top_srcdir)/mteval/libmteval.a $(top_srcdir)/utils/libutils.a ../klm/lm/libklm.a ../klm/util/libklm_util.a -lz diff --git a/training/creg.cc b/training/creg.cc new file mode 100644 index 00000000..58adea00 --- /dev/null +++ b/training/creg.cc @@ -0,0 +1,334 @@ +#include <cstdlib> +#include <iostream> +#include <vector> +#include <tr1/unordered_map> + +#include <boost/program_options.hpp> +#include <boost/program_options/variables_map.hpp> + +#include "json_feature_map_lexer.h" +#include "prob.h" +#include "filelib.h" +#include "weights.h" +#include "sparse_vector.h" +#include "liblbfgs/lbfgs++.h" + +using namespace std; +using namespace std::tr1; +namespace po = boost::program_options; + +void InitCommandLine(int argc, char** argv, po::variables_map* conf) { + po::options_description opts("Configuration options"); + opts.add_options() + ("training_features,x", po::value<string>(), "File containing training instance features (ARKRegression format)") + ("training_responses,y", po::value<string>(), "File containing training response features (ARKRegression format)") + ("linear,n", "Linear (rather than logistic) regression") + ("l1",po::value<double>()->default_value(0.0), "l_1 regularization strength") + ("l2",po::value<double>()->default_value(0.0), "l_2 regularization strength") + ("weights,w", po::value<string>(), "Initial weights") + ("epsilon,e", po::value<double>()->default_value(1e-4), "Epsilon for convergence test. Terminates when ||g|| < epsilon * max(1, ||x||)") + ("memory_buffers,m",po::value<unsigned>()->default_value(40), "Number of memory buffers for LBFGS") + ("help,h", "Help"); + po::options_description dcmdline_options; + dcmdline_options.add(opts); + po::store(parse_command_line(argc, argv, dcmdline_options), *conf); + if (conf->count("help") || !conf->count("training_features") || !conf->count("training_responses")) { + cerr << dcmdline_options << endl; + exit(1); + } +} + +struct TrainingInstance { + SparseVector<float> x; + union { + unsigned label; // for categorical predictions + float value; // for continuous predictions + } y; +}; + +struct ReaderHelper { + explicit ReaderHelper(vector<TrainingInstance>* xyp) : xy_pairs(xyp), lc(), flag() {} + unordered_map<string, unsigned> id2ind; + vector<TrainingInstance>* xy_pairs; + int lc; + bool flag; +}; + +void ReaderCB(const string& id, const SparseVector<float>& fmap, void* extra) { + ReaderHelper& rh = *reinterpret_cast<ReaderHelper*>(extra); + ++rh.lc; + if (rh.lc % 1000 == 0) { cerr << '.'; rh.flag = true; } + if (rh.lc % 40000 == 0) { cerr << " [" << rh.lc << "]\n"; rh.flag = false; } + const unordered_map<string, unsigned>::iterator it = rh.id2ind.find(id); + if (it == rh.id2ind.end()) { + cerr << "Unlabeled example in line " << rh.lc << endl; + abort(); + } + (*rh.xy_pairs)[it->second - 1].x = fmap; +} + +void ReadLabeledInstances(const string& ffeats, + const string& fresp, + const bool is_continuous, + vector<TrainingInstance>* xy_pairs, + vector<string>* labels) { + bool flag = false; + xy_pairs->clear(); + int lc = 0; + ReaderHelper rh(xy_pairs); + unordered_map<string, unsigned> label2id; + cerr << "Reading training responses from " << fresp << " ..." << endl; + ReadFile fr(fresp); + for (unsigned i = 0; i < labels->size(); ++i) + label2id[(*labels)[i]] = i; + istream& in = *fr.stream(); + string line; + while(getline(in, line)) { + ++lc; + if (lc % 1000 == 0) { cerr << '.'; flag = true; } + if (lc % 40000 == 0) { cerr << " [" << lc << "]\n"; flag = false; } + if (line.size() == 0) continue; + if (line[0] == '#') continue; + unsigned p = 0; + while (p < line.size() && line[p] != ' ' && line[p] != '\t') { ++p; } + unsigned& ind = rh.id2ind[line.substr(0, p)]; + if (ind != 0) { cerr << "ID " << line.substr(0, p) << " duplicated in line " << lc << endl; abort(); } + while (p < line.size() && (line[p] == ' ' || line[p] == '\t')) { ++p; } + assert(p < line.size()); + xy_pairs->push_back(TrainingInstance()); + ind = xy_pairs->size(); + if (is_continuous) { + xy_pairs->back().y.value = strtof(&line[p], 0); + } else { // categorical predictions + unordered_map<string, unsigned>::iterator it = label2id.find(line.substr(p)); + if (it == label2id.end()) { + const string label = line.substr(p); + it = label2id.insert(make_pair(label, labels->size())).first; + labels->push_back(label); + } + xy_pairs->back().y.label = it->second; // label id + } + } + if (flag) cerr << endl; + if (!is_continuous) { + cerr << "LABELS:"; + for (unsigned j = 0; j < labels->size(); ++j) + cerr << " " << (*labels)[j]; + cerr << endl; + } + cerr << "Reading training features from " << ffeats << " ..." << endl; + ReadFile ff(ffeats); + JSONFeatureMapLexer::ReadRules(ff.stream(), ReaderCB, &rh); + if (rh.flag) cerr << endl; +} + +// helper base class (not polymorphic- just a container and some helper functions) for loss functions +// real loss functions should implement double operator()(const vector<double>& x, double* g), +// which should evaluate f(x) and g = f'(x) +struct BaseLoss { + // dimp1 = number of categorial outputs possible for logistic regression + // for linear regression, it should be 1 more than the dimension of the response variable + BaseLoss( + const vector<TrainingInstance>& tr, + unsigned dimp1, + unsigned numfeats, + unsigned ll2) : training(tr), K(dimp1), p(numfeats), l2(ll2) {} + + // weight vector layout for K classes, with p features + // w[0 : K-1] = bias weights + // w[y*p + K : y*p + K + p - 1] = feature weights for y^th class + // this representation is used in ComputeDotProducts and GradAdd + void ComputeDotProducts(const SparseVector<float>& fx, // feature vector of x + const vector<double>& w, // full weight vector + vector<double>* pdotprods) const { + vector<double>& dotprods = *pdotprods; + const unsigned km1 = K - 1; + dotprods.resize(km1); + for (unsigned y = 0; y < km1; ++y) + dotprods[y] = w[y]; // bias terms + for (SparseVector<float>::const_iterator it = fx.begin(); it != fx.end(); ++it) { + const float fval = it->second; + const unsigned fid = it->first; + for (unsigned y = 0; y < km1; ++y) + dotprods[y] += w[fid + y * p + km1] * fval; + } + } + + double ApplyRegularizationTerms(const vector<double>& weights, + double* g) const { + double reg = 0; + for (size_t i = K - 1; i < weights.size(); ++i) { + const double& w_i = weights[i]; + reg += l2 * w_i * w_i; + g[i] += 2 * l2 * w_i; + } + return reg; + } + + void GradAdd(const SparseVector<float>& fx, + const unsigned y, + const double scale, + double* acc) const { + acc[y] += scale; // class bias + for (SparseVector<float>::const_iterator it = fx.begin(); + it != fx.end(); ++it) + acc[it->first + y * p + K - 1] += it->second * scale; + } + + const vector<TrainingInstance>& training; + const unsigned K, p; + const double l2; +}; + +struct UnivariateSquaredLoss : public BaseLoss { + UnivariateSquaredLoss( + const vector<TrainingInstance>& tr, + unsigned numfeats, + const double l2) : BaseLoss(tr, 2, numfeats, l2) {} + + // evaluate squared loss and gradient + double operator()(const vector<double>& x, double* g) const { + fill(g, g + x.size(), 0.0); + double cll = 0; + vector<double> dotprods(1); // univariate prediction + for (int i = 0; i < training.size(); ++i) { + const SparseVector<float>& fmapx = training[i].x; + const double refy = training[i].y.value; + ComputeDotProducts(fmapx, x, &dotprods); + double diff = dotprods[0] - refy; + cll += diff * diff; + + double scale = 2 * diff; + GradAdd(fmapx, 0, scale, g); + } + double reg = ApplyRegularizationTerms(x, g); + return cll + reg; + } +}; + +struct MulticlassLogLoss : public BaseLoss { + MulticlassLogLoss( + const vector<TrainingInstance>& tr, + unsigned k, + unsigned numfeats, + const double l2) : BaseLoss(tr, k, numfeats, l2) {} + + // evaluate log loss and gradient + double operator()(const vector<double>& x, double* g) const { + fill(g, g + x.size(), 0.0); + vector<double> dotprods(K - 1); // K-1 degrees of freedom + vector<prob_t> probs(K); + double cll = 0; + for (int i = 0; i < training.size(); ++i) { + const SparseVector<float>& fmapx = training[i].x; + const unsigned refy = training[i].y.label; + //cerr << "FMAP: " << fmapx << endl; + ComputeDotProducts(fmapx, x, &dotprods); + prob_t z; + for (unsigned j = 0; j < dotprods.size(); ++j) + z += (probs[j] = prob_t(dotprods[j], init_lnx())); + z += (probs.back() = prob_t::One()); + for (unsigned y = 0; y < probs.size(); ++y) { + probs[y] /= z; + //cerr << " p(y=" << y << ")=" << probs[y].as_float() << "\tz=" << z << endl; + } + cll -= log(probs[refy]); // log p(y | x) + + for (unsigned y = 0; y < dotprods.size(); ++y) { + double scale = probs[y].as_float(); + if (y == refy) { scale -= 1.0; } + GradAdd(fmapx, y, scale, g); + } + } + double reg = ApplyRegularizationTerms(x, g); + return cll + reg; + } +}; + +template <class LossFunction> +double LearnParameters(LossFunction& loss, + const double l1, + const unsigned l1_start, + const unsigned memory_buffers, + const double eps, + vector<double>* px) { + LBFGS<LossFunction> lbfgs(px, loss, memory_buffers, l1, l1_start, eps); + lbfgs.MinimizeFunction(); + return 0; +} + +int main(int argc, char** argv) { + po::variables_map conf; + InitCommandLine(argc, argv, &conf); + string line; + vector<TrainingInstance> training; + const string xfile = conf["training_features"].as<string>(); + const string yfile = conf["training_responses"].as<string>(); + double l1 = conf["l1"].as<double>(); + double l2 = conf["l2"].as<double>(); + const unsigned memory_buffers = conf["memory_buffers"].as<unsigned>(); + const double epsilon = conf["epsilon"].as<double>(); + if (l1 < 0.0) { + cerr << "L1 strength must be >= 0\n"; + return 1; + } + if (l2 < 0.0) { + cerr << "L2 strength must be >= 0\n"; + return 2; + } + + const bool is_continuous = conf.count("linear"); + vector<string> labels; // only populated for non-continuous models + ReadLabeledInstances(xfile, yfile, is_continuous, &training, &labels); + + if (conf.count("weights")) { + cerr << "Initial weights are not implemented, please implement." << endl; + // TODO read weights for categorical and continuous predictions + // can't use normal cdec weight framework + abort(); + } + + cerr << " Number of features: " << FD::NumFeats() << endl; + cerr << "Number of training examples: " << training.size() << endl; + const unsigned p = FD::NumFeats(); + cout.precision(15); + + if (conf.count("linear")) { // linear regression + vector<double> weights(1 + FD::NumFeats(), 0.0); + cerr << " Number of parameters: " << weights.size() << endl; + UnivariateSquaredLoss loss(training, p, l2); + LearnParameters(loss, l1, 1, memory_buffers, epsilon, &weights); + cout << p << "\t***CONTINUOUS***" << endl; + cout << "***BIAS***\t" << weights[0] << endl; + for (unsigned f = 0; f < p; ++f) { + const double w = weights[1 + f]; + if (w) + cout << FD::Convert(f) << "\t" << w << endl; + } + } else { // logistic regression + vector<double> weights((1 + FD::NumFeats()) * (labels.size() - 1), 0.0); + cerr << " Number of parameters: " << weights.size() << endl; + cerr << " Number of labels: " << labels.size() << endl; + const unsigned K = labels.size(); + const unsigned km1 = K - 1; + MulticlassLogLoss loss(training, K, p, l2); + LearnParameters(loss, l1, km1, memory_buffers, epsilon, &weights); + + cout << p << "\t***CATEGORICAL***"; + for (unsigned y = 0; y < K; ++y) + cout << '\t' << labels[y]; + cout << endl; + for (unsigned y = 0; y < km1; ++y) + cout << labels[y] << "\t***BIAS***\t" << weights[y] << endl; + for (unsigned y = 0; y < km1; ++y) { + for (unsigned f = 0; f < p; ++f) { + const double w = weights[km1 + y * p + f]; + if (w) + cout << labels[y] << "\t" << FD::Convert(f) << "\t" << w << endl; + } + } + } + + return 0; +} + diff --git a/training/liblbfgs/lbfgs++.h b/training/liblbfgs/lbfgs++.h index 342f9b0e..92ead955 100644 --- a/training/liblbfgs/lbfgs++.h +++ b/training/liblbfgs/lbfgs++.h @@ -16,28 +16,33 @@ template <typename Function> class LBFGS { public: - LBFGS(size_t n, // number of variables - const Function& f, // function to optimize - double l1_c = 0.0, // l1 penalty strength - size_t m = 10 // number of memory buffers - // TODO should use custom allocator here: + LBFGS(size_t n, // number of variables + const Function& f, // function to optimize + size_t m = 10, // number of memory buffers + double l1_c = 0.0, // l1 penalty strength + unsigned l1_start = 0, // l1 penalty starting index + double eps = 1e-5 // convergence epsilon + // TODO should use custom allocator here: ) : p_x(new std::vector<lbfgsfloatval_t>(n, 0.0)), owned(true), m_x(*p_x), func(f) { - Init(m, l1_c); + Init(m, l1_c, l1_start, eps); } // constructor where external vector storage for variables is used LBFGS(std::vector<lbfgsfloatval_t>* px, const Function& f, - double l1_c = 0.0, // l1 penalty strength - size_t m = 10 + size_t m = 10, // number of memory buffers + double l1_c = 0.0, // l1 penalty strength + unsigned l1_start = 0, // l1 penalty starting index + double eps = 1e-5 // convergence epsilon + // TODO should use custom allocator here: ) : p_x(px), owned(false), m_x(*p_x), func(f) { - Init(m, l1_c); + Init(m, l1_c, l1_start, eps); } ~LBFGS() { @@ -60,12 +65,14 @@ class LBFGS { } private: - void Init(size_t m, double l1_c) { + void Init(size_t m, double l1_c, unsigned l1_start, double eps) { lbfgs_parameter_init(¶m); param.m = m; + param.epsilon = eps; if (l1_c > 0.0) { param.linesearch = LBFGS_LINESEARCH_BACKTRACKING; - param.orthantwise_c = 1.0; + param.orthantwise_c = l1_c; + param.orthantwise_start = l1_start; } silence = false; } diff --git a/training/liblbfgs/ll_test.cc b/training/liblbfgs/ll_test.cc index 43c0f214..48bc0366 100644 --- a/training/liblbfgs/ll_test.cc +++ b/training/liblbfgs/ll_test.cc @@ -5,7 +5,7 @@ using namespace std; // Function must be lbfgsfloatval_t f(x.begin, x.end, g.begin) lbfgsfloatval_t func(const vector<lbfgsfloatval_t>& x, lbfgsfloatval_t* g) { - int i; + unsigned i; lbfgsfloatval_t fx = 0.0; for (i = 0;i < x.size();i += 2) { @@ -24,7 +24,7 @@ void Opt(F& f) { lbfgs.MinimizeFunction(); } -int main(int argc, char** argv) { +int main() { Opt(func); return 0; } diff --git a/utils/Makefile.am b/utils/Makefile.am index 46650c75..b7da0f06 100644 --- a/utils/Makefile.am +++ b/utils/Makefile.am @@ -33,12 +33,16 @@ libutils_a_SOURCES = \ sparse_vector.cc \ timing_stats.cc \ verbose.cc \ + json_feature_map_lexer.cc \ weights.cc if HAVE_CMPH libutils_a_SOURCES += perfect_hash.cc endif +json_feature_map_lexer.cc: json_feature_map_lexer.ll + $(LEX) -s -8 -CF -o$@ $< + phmt_SOURCES = phmt.cc ts_SOURCES = ts.cc m_test_SOURCES = m_test.cc diff --git a/utils/fast_sparse_vector.h b/utils/fast_sparse_vector.h index af832950..68caa704 100644 --- a/utils/fast_sparse_vector.h +++ b/utils/fast_sparse_vector.h @@ -7,8 +7,6 @@ // important: indexes are integers // important: iterators may return elements in any order -#include "config.h" - #include <cmath> #include <cstring> #include <climits> @@ -16,8 +14,9 @@ #include <cassert> #include <vector> -#include <boost/static_assert.hpp> +#include "config.h" +#include <boost/static_assert.hpp> #if HAVE_BOOST_ARCHIVE_TEXT_OARCHIVE_HPP #include <boost/serialization/map.hpp> #endif @@ -119,6 +118,17 @@ class FastSparseVector { std::memcpy(this, &other, sizeof(FastSparseVector)); if (is_remote_) data_.rbmap = new std::map<int, T>(*data_.rbmap); } + FastSparseVector(std::pair<int, T>* first, std::pair<int, T>* last) { + const ptrdiff_t n = last - first; + if (n <= LOCAL_MAX) { + is_remote_ = false; + local_size_ = n; + std::memcpy(data_.local, first, sizeof(std::pair<int, T>) * n); + } else { + is_remote_ = true; + data_.rbmap = new std::map<int, T>(first, last); + } + } void erase(int k) { if (is_remote_) { data_.rbmap->erase(k); diff --git a/utils/json_feature_map_lexer.h b/utils/json_feature_map_lexer.h new file mode 100644 index 00000000..3324aa29 --- /dev/null +++ b/utils/json_feature_map_lexer.h @@ -0,0 +1,15 @@ +#ifndef _RULE_LEXER_H_ +#define _RULE_LEXER_H_ + +#include <iostream> +#include <string> + +#include "sparse_vector.h" + +struct JSONFeatureMapLexer { + typedef void (*FeatureMapCallback)(const std::string& id, const SparseVector<float>& fmap, void* extra); + static void ReadRules(std::istream* in, FeatureMapCallback func, void* extra); +}; + +#endif + diff --git a/utils/json_feature_map_lexer.ll b/utils/json_feature_map_lexer.ll new file mode 100644 index 00000000..372b52f5 --- /dev/null +++ b/utils/json_feature_map_lexer.ll @@ -0,0 +1,132 @@ +%option nounput +%{ + +#include "json_feature_map_lexer.h" +#include "fdict.h" +#include "fast_sparse_vector.h" + +#define YY_DECL int json_fmap_yylex (void) +#undef YY_INPUT +#define YY_INPUT(buf, result, max_size) (result = jfmap_stream->read(buf, max_size).gcount()) +#define YY_SKIP_YYWRAP 1 +int yywrap() { return 1; } + +JSONFeatureMapLexer::FeatureMapCallback json_fmap_callback = NULL; +void* json_fmap_callback_extra = NULL; +std::istream* jfmap_stream = NULL; +bool fl = true; +unsigned spos = 0; +char featname[16000]; +#define MAX_FEATS 20000 +std::pair<int, float> featmap[MAX_FEATS]; +unsigned curfeat = 0; +std::string instid; + +inline unsigned unicode_escape_to_utf8(uint16_t w1, uint16_t w2, char* putf8) { + uint32_t cp; + if((w1 & 0xfc00) == 0xd800) { + if((w2 & 0xfc00) == 0xdc00) { + cp = 0x10000 + (((static_cast<uint32_t>(w1) & 0x3ff) << 10) | (w2 & 0x3ff)); + } else { + abort(); + } + } else { + cp = w1; + } + + + if(cp < 0x80) { + putf8[0] = static_cast<char>(cp); + return 1; + } else if(cp < 0x0800) { + putf8[0] = 0xc0 | ((cp >> 6) & 0x1f); + putf8[1] = 0x80 | (cp & 0x3f); + return 2; + } else if(cp < 0x10000) { + putf8[0] = 0xe0 | ((cp >> 6) & 0x0f); + putf8[1] = 0x80 | ((cp >> 6) & 0x3f); + putf8[2] = 0x80 | (cp & 0x3f); + return 3; + } else if(cp < 0x1fffff) { + putf8[0] = 0xf0 | ((cp >> 18) & 0x07); + putf8[1] = 0x80 | ((cp >> 12) & 0x3f); + putf8[2] = 0x80 | ((cp >> 6) & 0x3f); + putf8[3] = 0x80 | (cp & 0x3f); + return 4; + } else { + abort(); + } + return 0; +} + +%} + +ID [A-Za-z_0-9]+ +HEX_D [a-fA-F0-9] +INT [-]?[0-9]+ +DOUBLE {INT}((\.[0-9]+)?([eE][-+]?[0-9]+)?) +WS [ \t\r\n] +LCB [{] +RCB [}] +UNESCAPED_CH [^\"\\\b\n\r\f\t] + +%x JSON PREVAL STRING JSONVAL POSTVAL DOUBLE +%% + +<INITIAL>{ID} { instid = yytext; BEGIN(JSON); } + +<JSON>{WS}*{LCB}{WS}* { BEGIN(PREVAL); } + +<PREVAL>\" { BEGIN(STRING); spos=0; } + +<STRING>\" { featname[spos] = 0; + featmap[curfeat].first = FD::Convert(featname); + BEGIN(JSONVAL); + } +<STRING>{UNESCAPED_CH} { featname[spos++] = yytext[0]; } +<STRING>\\\" { featname[spos++] = '"'; } +<STRING>\\\\ { featname[spos++] = '\\'; } +<STRING>\\\/ { featname[spos++] = '/'; } +<STRING>\\b { } +<STRING>\\f { } +<STRING>\\n { } +<STRING>\\r { } +<STRING>\\t { } +<STRING>\\u{HEX_D}{HEX_D}{HEX_D}{HEX_D} { abort(); + } + +<JSONVAL>{WS}*:{WS}* { BEGIN(DOUBLE); } +<DOUBLE>{DOUBLE} { featmap[curfeat++].second = strtod(yytext, 0); + BEGIN(POSTVAL); } + +<POSTVAL>{WS}*,{WS}* { BEGIN(PREVAL); } +<POSTVAL>{WS}*{RCB}\n* { + const SparseVector<float> x(&featmap[0], &featmap[curfeat]); + json_fmap_callback(instid, x, json_fmap_callback_extra); + curfeat = 0; + BEGIN(INITIAL); + } + +<PREVAL,POSTVAL,DOUBLE,JSONVAL,INITIAL>. { std::cerr << "bad input: " << yytext << std::endl; abort(); } + +%% + +void JSONFeatureMapLexer::ReadRules(std::istream* in, FeatureMapCallback func, void* extra) { + json_fmap_callback = func; + json_fmap_callback_extra = extra; + jfmap_stream = in; + json_fmap_yylex(); +} + +#if 0 +void cb(const std::string& id, const SparseVector<float>& fmap, void* extra) { + (void) extra; + static int cc = 0; + cc++; +} + +int main() { + JSONFeatureMapLexer::ReadRules(&std::cin, cb, NULL); +} +#endif + |